code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch from torchvision import datasets from torchvision.transforms import ToTensor, Lambda ds = datasets.FashionMNIST( root="data", train=True, download=True, transform=ToTensor(), target_transform=Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1)) ) # - target_transform = Lambda(lambda y: torch.zeros( 10, dtype=torch.float).scatter_(dim=0, index=torch.tensor(y), value=1))
docs/_build/.jupyter_cache/executed/753788cf6fad1a7767c3aa962fa58661/base.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Statistics Review # # From the pdf: https://risk-engineering.org/static/PDF/slides-stat-modelling.pdf import numpy as np import matplotlib.pyplot as plt # + nobs = 100 X = np.linspace(0, 10, nobs) obs = np.sin(X) + np.random.uniform(-0.1, 0.1, nobs) plt.plot(obs) # - # ### Probability mass function (PMF # * For discrete data # * Finds all the values that a random value might take # <p style="text-align: center;font-weight: 900px;font-size: 24px;">Coin toss (two coins)</p> # + n_tosses = 2 res = np.random.randint(0, 2, n_tosses) print(res) # If heads == 1 and tails == 0, get number of heads in sequence. n_heads = res.sum() print(f"Number of heads results for {n_tosses} tosses: {n_heads}") # - # #### Larger scale n_epochs = 1000 heads = np.zeros(n_epochs, dtype = np.uint32) for i in range(n_epochs): heads[i] = np.random.randint(0, 2, 2).sum() plt.stem(np.bincount(heads), use_line_collection=True)
notebook-samples/ds-stats/statistics review.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.2 (''venv'': venv)' # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import GridSearchCV from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.feature_selection import RFE from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import numpy as np import pandas as pd import os import _pickle as cPickle import joblib # - data = pd.read_csv('../data/usapl_data.csv') data data.dtypes data.isnull().sum() for column in data.columns: if data[column].isnull().sum() > 30000: # print(column) print(f'{column}: {data[column].isnull().sum()} null values') raw_lifters = data[data['Equipment'] == 'Raw'] raw_lifters raw_lifters.isnull().sum() # Can drop these columns, too many nan values for column in raw_lifters.columns: if raw_lifters[column].isnull().sum() > 30000: print(column) raw_lifters.drop(['Squat4Kg', 'Bench4Kg', 'Deadlift4Kg', 'MeetTown'], axis=1, inplace=True) clean = raw_lifters.dropna() # Completely clean dataset clean.isnull().sum() clean.sort_values(['AgeClass', 'Age'], inplace=True) clean # + # clean.to_csv('../data/no_null_vals.csv', index=False) # - clean.dtypes # + # Remove Mx clean = clean[clean['Sex'] != 'Mx'] clean['AgeClass'].unique() # 18-19 through 35-39 are the important categories # - clean.groupby('AgeClass').agg('count') ages = ['18-19', '20-23', '24-34', '35-39'] clean_ages = clean[clean.AgeClass.isin(ages) == True] important_cols = ['Sex', 'Age', 'AgeClass', 'BodyweightKg', 'WeightClassKg', 'Best3SquatKg', 'Best3BenchKg', 'Best3DeadliftKg', 'TotalKg', 'Country', 'Date'] clean_ages = clean_ages[important_cols] clean_ages['TotalKg'].hist() sns.countplot(x='AgeClass', data=clean_ages) # Most of the data is concentrated between the 18-19 and 35-39 age ranges sns.countplot(x='Sex', data=clean_ages) clean_ages['Sex'].value_counts() clean = clean[important_cols] clean plt.figure(figsize=(9,6)) sns.histplot(data=clean['TotalKg']) sns.displot(data=clean, x='Age', y='TotalKg') sns.relplot(data=clean, x='Age', y='TotalKg', hue='Sex', col='Sex') fig, (ax1, ax2) = plt.subplots(1,2,figsize=(10,5)) ax1 = sns.histplot(data=clean['TotalKg']) ax2 = sns.histplot(data=clean['BodyweightKg']) clean_ages clean_ages[clean_ages['Sex'] == 'M']['WeightClassKg'].unique() def encode_and_bind(original_dataframe, feature_to_encode): dummies = pd.get_dummies(original_dataframe[[feature_to_encode]]) res = pd.concat([original_dataframe, dummies], axis=1) res = res.drop([feature_to_encode], axis=1) return(res) # + ''' USAPL RECENTLY UPDATED WEIGHT CLASSES, WE CAN DROP THAT BECAUSE BODYWEIGHT ACCOUNTS FOR IT ALREADY ''' # d_features = clean_ages[['Sex', 'Age', 'BodyweightKg', 'Best3SquatKg', 'Best3BenchKg']] # d_target = clean_ages['Best3DeadliftKg'] # b_features = clean_ages[['Sex', 'Age', 'BodyweightKg', 'Best3SquatKg', 'Best3DeadliftKg']] # b_target = clean_ages['Best3BenchKg'] # s_features = clean_ages[['Sex', 'Age', 'BodyweightKg', 'Best3DeadliftKg', 'Best3BenchKg']] # s_target = clean_ages['Best3SquatKg'] d_features = clean[['Sex', 'Age', 'BodyweightKg', 'Best3BenchKg', 'Best3SquatKg']] d_target = clean['Best3DeadliftKg'] b_features = clean[['Sex', 'Age', 'BodyweightKg', 'Best3SquatKg', 'Best3DeadliftKg']] b_target = clean['Best3BenchKg'] s_features = clean[['Sex', 'Age', 'BodyweightKg', 'Best3BenchKg', 'Best3DeadliftKg']] s_target = clean['Best3SquatKg'] # + # clean.to_csv('model_training_data', index=False) # - b_features def train_test_scaled_split(features, target): features_to_encode = ['Sex'] for feature in features_to_encode: features = encode_and_bind(features, feature) # split data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(features, target, random_state=3000) # Create the scaler scaler = MinMaxScaler() # Fit the scaler to the training data(features only) scaler.fit(X_train) # Transform X_train and X_test based on the (same) scaler X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) # Replace any potential NaN with 0 X_train_scaled[np.isnan(X_train_scaled)] = 0 X_test_scaled[np.isnan(X_test_scaled)] = 0 return X_train_scaled, X_test_scaled, y_train, y_test # + # split data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(features, target, random_state=3000) # Create the scaler scaler = MinMaxScaler() # Fit the scaler to the training data(features only) scaler.fit(X_train) # Transform X_train and X_test based on the (same) scaler X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) # Replace any potential NaN with 0 X_train_scaled[np.isnan(X_train_scaled)] = 0 X_test_scaled[np.isnan(X_test_scaled)] = 0 # - pred_map = {'squat': [s_features, s_target], 'bench': [b_features, b_target], 'deadlift': [d_features, d_target]} best_models = {} scalers = {} for lift, data in pred_map.items(): features, target = data X_train_scaled, X_test_scaled, y_train, y_test = train_test_scaled_split(features, target) model = RandomForestRegressor().fit(X=X_train_scaled, y=y_train) # Prediction results print(lift + ":\n") print("\tR-squared value for training set: ", r2_score(y_train, model.predict(X_train_scaled))) print("\tMean-squared-error value for training set: ", mean_squared_error(y_train, model.predict(X_train_scaled))) print("\n") print("\tR-squared value for testing set: ", r2_score(y_test, model.predict(X_test_scaled))) print("\tMean-squared-error value for testing set: ", mean_squared_error(y_test, model.predict(X_test_scaled))) print("\n") best_models[lift] = model best_models # + for lift, data in pred_map.items(): features, target = data features_to_encode = ['Sex'] for feature in features_to_encode: features = encode_and_bind(features, feature) # split data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(features, target, random_state=3000) # Create the scaler scaler = MinMaxScaler() # Fit the scaler to the training data(features only) scaler.fit(X_train) scalers[lift] = scaler # - scalers # + # Export models and scalers for lift, model in best_models.items(): with open(f'{lift}_model.pickle', 'wb') as output_file: cPickle.dump(model, output_file) for lift, scaler in scalers.items(): scaler_filename = f'{lift}_scaler' joblib.dump(scaler, scaler_filename) # - best_param_models = {} # + # scoring = {"Max Error": "max_error", "R-squared": "r2"} # for lift, data in pred_map.items(): # features, target = data # X_train_scaled, X_test_scaled, y_train, y_test = train_test_scaled_split(features, target) # param_grid = {"max_depth":[3, 5, 7, 9, 11]} # grid_search = GridSearchCV(RandomForestRegressor(), param_grid, scoring=scoring, refit='R-squared', return_train_score=True, cv=5) # # Fit the grid search object on the training data (CV will be performed on this) # grid_search.fit(X=X_train_scaled, y=y_train) # # Grid search results # print(lift + ":\n") # print("\tBest estimator: ", grid_search.best_estimator_) # print("\tBest parameters: ", grid_search.best_params_) # print("\tBest cross-validation score: ", grid_search.best_score_) # print("\n") # model = grid_search.best_estimator_ # print("\tR-squared value for training set: ", r2_score(y_train, model.predict(X_train_scaled))) # print("\tMean-squared-error value for training set: ", mean_squared_error(y_train, model.predict(X_train_scaled))) # print("\n") # # Add the best model to dictionary # best_param_models[estimator_name] = grid_search.best_estimator_ # + # Parameter grids for Validation/Optimization ridge_param_grid = {"alpha":[0.001, 0.01, 0.1, 1, 10, 100]} lasso_param_grid = {"alpha":[0.001, 0.01, 0.1, 1, 10, 100]} knn_param_grid = {"n_neighbors":[1, 5, 10], "metric": ['euclidean', 'manhattan', 'minkowski']} tree_param_grid = {"max_depth":[3, 5, 7, 9, 11]} forest_param_grid = {"max_depth":[3, 5, 7, 9, 11]} # Dictionary of models with their parameter grids estimators = { 'Ridge': [Ridge(), ridge_param_grid], 'Lasso': [Lasso(), lasso_param_grid], 'k-Nearest Neighbor': [KNeighborsRegressor(), knn_param_grid], 'Decision Tree': [DecisionTreeRegressor(), tree_param_grid], 'Random Forest': [RandomForestRegressor(), forest_param_grid]} # + # Initial Model Performance Analysis print("Initial Results for Models Trained on All Features\n") for estimator_name, estimator_objects in estimators.items(): estimator_model = estimator_objects[0] model = estimator_model.fit(X=X_train_scaled, y=y_train) # Prediction results print(estimator_name + ":\n") print("\tR-squared value for training set: ", r2_score(y_train, model.predict(X_train_scaled))) print("\tMean-squared-error value for training set: ", mean_squared_error(y_train, model.predict(X_train_scaled))) print("\n") print("\tR-squared value for testing set: ", r2_score(y_test, model.predict(X_test_scaled))) print("\tMean-squared-error value for testing set: ", mean_squared_error(y_test, model.predict(X_test_scaled))) print("\n") # - ''' FOR DEADLIFT RANDOM FOREST IS BEST Initial Results for Models Trained on All Features Ridge: R-squared value for training set: 0.900791795329309 Mean-squared-error value for training set: 310.66843606211154 R-squared value for testing set: 0.8982009899083301 Mean-squared-error value for testing set: 318.8348637482929 Lasso: R-squared value for training set: 0.8604311108121666 Mean-squared-error value for training set: 437.057082837424 R-squared value for testing set: 0.8591595054946677 Mean-squared-error value for testing set: 441.11293258562176 k-Nearest Neighbor: R-squared value for training set: 0.9268768144787918 Mean-squared-error value for training set: 228.9837394110679 R-squared value for testing set: 0.885514033615932 Mean-squared-error value for testing set: 358.57045623808995 Decision Tree: R-squared value for training set: 0.9997456336932952 Mean-squared-error value for training set: 0.796542815719662 R-squared value for testing set: 0.8247594118265117 Mean-squared-error value for testing set: 548.8541490054909 Random Forest: R-squared value for training set: 0.9867339096710506 Mean-squared-error value for training set: 41.54248682186873 R-squared value for testing set: 0.9029074378444212 Mean-squared-error value for testing set: 304.09425197720844 WITHOUT CLASSES Ridge: R-squared value for training set: 0.898504968343617 Mean-squared-error value for training set: 317.8295873554723 R-squared value for testing set: 0.8962298550449012 Mean-squared-error value for testing set: 325.0084651914204 Lasso: R-squared value for training set: 0.8604159502657829 Mean-squared-error value for training set: 437.10455777410425 R-squared value for testing set: 0.8591452403723664 Mean-squared-error value for testing set: 441.1576109996967 k-Nearest Neighbor: R-squared value for training set: 0.9281983657588784 Mean-squared-error value for training set: 224.84532897693873 R-squared value for testing set: 0.8878280908548475 Mean-squared-error value for testing set: 351.32282068828425 Decision Tree: R-squared value for training set: 0.9997418966085204 Mean-squared-error value for training set: 0.808245419211549 R-squared value for testing set: 0.8250143648717643 Mean-squared-error value for testing set: 548.0556351557776 Random Forest: R-squared value for training set: 0.986720766316167 Mean-squared-error value for training set: 41.58364496518781 R-squared value for testing set: 0.9025839068349805 Mean-squared-error value for testing set: 305.1075522560664 ''' ''' FOR BENCH Ridge: R-squared value for training set: 0.892988866535197 Mean-squared-error value for training set: 176.53306406644927 R-squared value for testing set: 0.8964825991149635 Mean-squared-error value for testing set: 170.39844784776005 Lasso: R-squared value for training set: 0.8145834294703707 Mean-squared-error value for training set: 305.8761669415857 R-squared value for testing set: 0.8166275930373708 Mean-squared-error value for testing set: 301.8465809360993 k-Nearest Neighbor: R-squared value for training set: 0.9226421141450587 Mean-squared-error value for training set: 127.61498899707571 R-squared value for testing set: 0.8842375523053484 Mean-squared-error value for testing set: 190.55483655480288 Decision Tree: R-squared value for training set: 0.9997902915333114 Mean-squared-error value for training set: 0.3459497809858721 R-squared value for testing set: 0.8164845794658392 Mean-squared-error value for testing set: 302.0819934406811 Random Forest: R-squared value for training set: 0.9858845429595768 Mean-squared-error value for training set: 23.285847008281845 R-squared value for testing set: 0.9021000538156578 Mean-squared-error value for testing set: 161.1516395462605 WITHOUT CLASSES Ridge: R-squared value for training set: 0.8904680622199737 Mean-squared-error value for training set: 180.69155949837258 R-squared value for testing set: 0.8941704889775871 Mean-squared-error value for testing set: 174.20437781985777 Lasso: R-squared value for training set: 0.8139401495175196 Mean-squared-error value for training set: 306.9373666266323 R-squared value for testing set: 0.8160193189895053 Mean-squared-error value for testing set: 302.8478517633823 k-Nearest Neighbor: R-squared value for training set: 0.9230868882485617 Mean-squared-error value for training set: 126.88125847047938 R-squared value for testing set: 0.8861075022450645 Mean-squared-error value for testing set: 187.47673988162134 Decision Tree: R-squared value for training set: 0.9997902564728233 Mean-squared-error value for training set: 0.346007619223888 R-squared value for testing set: 0.8173917138206432 Mean-squared-error value for testing set: 300.58877312480723 Random Forest: R-squared value for training set: 0.9858982376597523 Mean-squared-error value for training set: 23.26325527128061 R-squared value for testing set: 0.9018881160223156 Mean-squared-error value for testing set: 161.50050718317019 ''' ''' FOR SQUAT Ridge: R-squared value for training set: 0.9105562760189563 Mean-squared-error value for training set: 268.75260056053975 R-squared value for testing set: 0.9104550067401644 Mean-squared-error value for testing set: 267.1095685007973 Lasso: R-squared value for training set: 0.8794849681241365 Mean-squared-error value for training set: 362.11292175333625 R-squared value for testing set: 0.8809785596583386 Mean-squared-error value for testing set: 355.036774415219 k-Nearest Neighbor: R-squared value for training set: 0.9340890035860671 Mean-squared-error value for training set: 198.04353959518784 R-squared value for testing set: 0.8993721303492088 Mean-squared-error value for testing set: 300.1693993496946 Decision Tree: R-squared value for training set: 0.9998465488672468 Mean-squared-error value for training set: 0.46107640816830153 R-squared value for testing set: 0.842712904857357 Mean-squared-error value for testing set: 469.1818781244988 Random Forest: R-squared value for training set: 0.98807146372382 Mean-squared-error value for training set: 35.84181206256046 R-squared value for testing set: 0.9142998788291369 Mean-squared-error value for testing set: 255.6404501588471 WITHOUT CLASSES Ridge: R-squared value for training set: 0.9100313257045812 Mean-squared-error value for training set: 270.3299248922422 R-squared value for testing set: 0.9101631812638482 Mean-squared-error value for testing set: 267.980073642611 Lasso: R-squared value for training set: 0.879482610475747 Mean-squared-error value for training set: 362.12000580694723 R-squared value for testing set: 0.8809764546205674 Mean-squared-error value for testing set: 355.04305366892487 k-Nearest Neighbor: R-squared value for training set: 0.9353216809041093 Mean-squared-error value for training set: 194.33969968188046 R-squared value for testing set: 0.9005989534753995 Mean-squared-error value for testing set: 296.50982907184897 Decision Tree: R-squared value for training set: 0.9998460483885462 Mean-squared-error value for training set: 0.4625802023567154 R-squared value for testing set: 0.8425767179090207 Mean-squared-error value for testing set: 469.5881190061077 Random Forest: R-squared value for training set: 0.9879984822578319 Mean-squared-error value for training set: 36.06110032454263 R-squared value for testing set: 0.9139984869229377 Mean-squared-error value for testing set: 256.5394916248604 '''
notebooks/model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (base) # language: python # name: base # --- # + [markdown] id="b0XwE5j0kwF1" # ## Query Output Preprocessing # # The graph database Dgraph returns query results in JSON format. The queries consist of getting all `originated` and all `responded` connections of a specified host. The `query_handler` tool converts these JSON outputs to CSV files (two CSV files for each host with some IP address - one for each connection direction (`originated`, `responded`)). # # This Jupyter notebook is used to: # # 1. Compute the neighbourhoods of these hosts. *(For each connection, compute its neighbourhood which is given by connections in a given time interval.)* # 2. Concat DataFrames to one final DataFrames. # 3. Assign labels. # 4. Write the result to a single file, ready for ML preprocessing (data preparation). # + [markdown] id="b0XwE5j0kwF1" # ## Neighbourhood Computation # # ### 0. Load the data # + import os PREFIX_PATH = '/home/sramkova/diploma_thesis_data/cicids2017/attacks' # get last two dictinary names of current directory, they correspond to directory names of input data attack_dir_path = '/'.join(os.getcwd().split('/')[-2:]) PREFIX = PREFIX_PATH + '/' + attack_dir_path + '/' print(PREFIX) # + colab={"base_uri": "https://localhost:8080/"} id="XtBLVeiRhsp-" outputId="965b53d2-df1f-40af-e21d-97728113baaa" import pandas as pd import numpy as np DIR_PATH_ORIG = PREFIX + 'originated' DIR_PATH_RESP = PREFIX + 'responded' file_list_orig = [] file_list_resp = [] def get_file_names(file_list, dir_path): for filename in os.listdir(dir_path): # only IPv4: if 'f' not in filename and filename.endswith('.csv'): # (if there is an 'f' present in the name of the file, it means that the file contains # connections of a host with IPv6 address) file_list.append(filename) # load filenames to lists: get_file_names(file_list_orig, DIR_PATH_ORIG) get_file_names(file_list_resp, DIR_PATH_RESP) print(len(file_list_orig)) print(len(file_list_resp)) # + # load as dataframes to a dictionary for easier processing: # elements of the dictionary are in a form: { host.ip -> df with connections of corresponding host } dfs_orig = {} dfs_resp = {} def load_files_to_dfs(dfs_dict, file_list, dir_path, prefix): prefix_name = 'output-' + prefix for filename in file_list: file_ip = filename file_ip = file_ip.replace(prefix_name, '').replace('.csv', '') df_conns = pd.read_csv(dir_path + '/' + filename) df_conns['connection.time'] = pd.to_datetime(df_conns['connection.ts']) # missing connection.service value means that Zeek wasn't able to extract the service => nulls can # be treated as a new category df_conns['connection.service'].fillna('none', inplace = True) dfs_dict[file_ip] = df_conns load_files_to_dfs(dfs_orig, file_list_orig, DIR_PATH_ORIG, 'o-') load_files_to_dfs(dfs_resp, file_list_resp, DIR_PATH_RESP, 'r-') print(len(dfs_orig)) print(len(dfs_resp)) # + # max, min times to check if they correspond to available attack times (considering the time shift): o_max = dfs_orig['192.168.10.25']['connection.time'][0] o_min = dfs_orig['192.168.10.25']['connection.time'][0] for o_ip in dfs_orig: o_df = dfs_orig[o_ip] cur_max = o_df['connection.time'].max() cur_min = o_df['connection.time'].min() if cur_max > o_max: o_max = cur_max # print(o_ip) if cur_min < o_min: o_min = cur_min # print(o_ip) print(o_min) print(o_max) # - # ### 1. Compute neighbourhoods for each row based on a time interval # # (e.g. time interval: +- 5 minutes) # + # various stat functions on attributes from neighbourhood: def get_counts(df, prefix): # counts (overall + counts of different protocols): proto_tcp_count = 0 proto_udp_count = 0 proto_icmp_count = 0 if 'connection.proto' in df: proto_counts = df['connection.proto'].value_counts() proto_tcp_count = proto_counts['tcp'] if 'tcp' in proto_counts else 0 proto_udp_count = proto_counts['udp'] if 'udp' in proto_counts else 0 proto_icmp_count = proto_counts['icmp'] if 'icmp' in proto_counts else 0 return {prefix + '_total': len(df.index), prefix + '_proto_tcp_count': proto_tcp_count, prefix + '_proto_udp_count': proto_udp_count, prefix + '_proto_icmp_count': proto_icmp_count } def get_modes(df, prefix): # .mode()[0] return the value of a categorical variable that appeared the most times return {prefix + '_connection.protocol_mode': df['connection.proto'].mode()[0] if 'connection.proto' in df else '-', prefix + '_connection.service_mode': df['connection.service'].mode()[0] if 'connection.service' in df else '-', prefix + '_connection.conn_state_mode': df['connection.conn_state'].mode()[0] if 'connection.conn_state' in df else '-' } def get_means(df, prefix): # .mean() returns mean of the corresponding numerical attribute variable values return {prefix + '_connection.time_mean': df['connection.time'].mean() if 'connection.time' in df else cur_time, prefix + '_connection.duration_mean': df['connection.duration'].mean() if 'connection.duration' in df else 0, # prefix + '_connection.orig_p_mean': df['connection.orig_p'].mean() if 'connection.orig_p' in df else 0, prefix + '_connection.orig_bytes_mean': df['connection.orig_bytes'].mean() if 'connection.orig_bytes' in df else 0, prefix + '_connection.orig_pkts_mean': df['connection.orig_pkts'].mean() if 'connection.orig_pkts' in df else 0, # prefix + '_connection.resp_p_mean': df['connection.resp_p'].mean() if 'connection.resp_p' in df else 0, prefix + '_connection.resp_bytes_mean': df['connection.resp_bytes'].mean() if 'connection.resp_bytes' in df else 0, prefix + '_connection.resp_pkts_mean': df['connection.resp_pkts'].mean() if 'connection.resp_pkts' in df else 0 } def get_stats_means(df, prefix): # .mean() returns mean of the corresponding numerical attribute variable values return {prefix + '_dns_count_mean': df['dns_count'].mean() if 'dns_count' in df else 0, prefix + '_ssh_count_mean': df['ssh_count'].mean() if 'ssh_count' in df else 0, prefix + '_http_count_mean': df['http_count'].mean() if 'http_count' in df else 0, prefix + '_ssl_count_mean': df['ssl_count'].mean() if 'ssl_count' in df else 0, prefix + '_files_count_mean': df['files_count'].mean() if 'files_count' in df else 0 } def get_medians(df, prefix): # .median() returns median of the corresponding numerical attribute variable values return {prefix + '_connection.time_median': df['connection.time'].median() if 'connection.time' in df else cur_time, prefix + '_connection.duration_median': df['connection.duration'].median() if 'connection.duration' in df else 0, # prefix + '_connection.orig_p_median': df['connection.orig_p'].median() if 'connection.orig_p' in df else 0, prefix + '_connection.orig_bytes_median': df['connection.orig_bytes'].median() if 'connection.orig_bytes' in df else 0, prefix + '_connection.orig_pkts_median': df['connection.orig_pkts'].median() if 'connection.orig_pkts' in df else 0, # prefix + '_connection.resp_p_median': df['connection.resp_p'].median() if 'connection.resp_p' in df else 0, prefix + '_connection.resp_bytes_median': df['connection.resp_bytes'].median() if 'connection.resp_bytes' in df else 0, prefix + '_connection.resp_pkts_median': df['connection.resp_pkts'].median() if 'connection.resp_pkts' in df else 0 } def get_orig_ports(df, prefix): # count orig_p categories: orig_well_known_count = 0 orig_reg_or_dyn_count = 0 unique_orig_p_list = df['connection.orig_p'].unique().tolist() values_orig_p = df['connection.orig_p'].value_counts() for uniq_p in unique_orig_p_list: if uniq_p < 1024: orig_well_known_count += values_orig_p[uniq_p] else: orig_reg_or_dyn_count += values_orig_p[uniq_p] return {prefix + '_orig_p_well_known_count': orig_well_known_count, prefix + '_orig_p_reg_or_dyn_count': orig_reg_or_dyn_count} def get_resp_ports(df, prefix): # count resp_p categories: common_ports = {21: 0, 22: 0, 53: 0, 80: 0, 123: 0, 443: 0, 3389: 0} resp_well_known = 0 resp_reg = 0 resp_dyn = 0 unique_resp_p_list = df['connection.resp_p'].unique().tolist() values_resp_p = df['connection.resp_p'].value_counts() for uniq_p in unique_resp_p_list: if uniq_p in common_ports.keys(): common_ports[uniq_p] += values_resp_p[uniq_p] elif uniq_p < 1024: resp_well_known += values_resp_p[uniq_p] elif uniq_p < 49152: resp_reg += values_resp_p[uniq_p] else: resp_dyn += values_resp_p[uniq_p] return {prefix + '_resp_p_21_count': common_ports[21], prefix + '_resp_p_22_count': common_ports[22], prefix + '_resp_p_53_count': common_ports[53], prefix + '_resp_p_80_count': common_ports[80], prefix + '_resp_p_123_count': common_ports[123], prefix + '_resp_p_443_count': common_ports[443], prefix + '_resp_p_3389_count': common_ports[3389], prefix + '_resp_p_well_known_count': resp_well_known, prefix + '_resp_p_reg_count': resp_reg, prefix + '_resp_p_dyn_count': resp_dyn} # + def generate_duration_filter(duration_val): # based on constants from data_exploration.ipynb if duration_val <= 0.0: return 0.000001, None elif duration_val <= 0.0001: return 0.000001, 0.001 elif duration_val <= 0.009: return 0.001, 0.05 elif duration_val <= 0.5: return 0.05, 1.5 elif duration_val <= 5: return 1.5, 10 elif duration_val <= 15: return 10, 20 elif duration_val <= 30: return 20, 40 elif duration_val <= 50: return 40, 60 elif duration_val <= 75: return 60, 90 elif duration_val <= 100: return 75, 110 return None, 100 def generate_bytes_filter(bytes_val): if bytes_val == 0: return 0, 0 elif bytes_val <= 1450: return bytes_val - 50, bytes_val + 50 elif bytes_val <= 35000: return bytes_val - 500, bytes_val + 500 else: return None, bytes_val - 1000 # - def get_similar_count(df, row, prefix): # protocol filter mask = (df['connection.proto'] == row['connection.proto']) df_filtered = df.loc[mask] # service filter mask = (df_filtered['connection.service'] == row['connection.service']) df_filtered = df_filtered.loc[mask] # conn_state filter mask = (df_filtered['connection.conn_state'] == row['connection.conn_state']) df_filtered = df_filtered.loc[mask] # duration filter lower, upper = generate_duration_filter(row['connection.duration']) if lower: mask = df_filtered['connection.duration'] >= lower df_filtered = df_filtered.loc[mask] if upper: mask = df_filtered['connection.duration'] <= upper df_filtered = df_filtered.loc[mask] # _bytes filter lower, upper = generate_duration_filter(row['connection.orig_bytes']) if lower: mask = df_filtered['connection.orig_bytes'] >= lower df_filtered = df_filtered.loc[mask] if upper: mask = df_filtered['connection.orig_bytes'] <= upper df_filtered = df_filtered.loc[mask] lower, upper = generate_duration_filter(row['connection.resp_bytes']) if lower: mask = df_filtered['connection.resp_bytes'] >= lower df_filtered = df_filtered.loc[mask] if upper: mask = df_filtered['connection.resp_bytes'] <= upper df_filtered = df_filtered.loc[mask] # _ip_bytes filter mask = (df_filtered['connection.orig_ip_bytes'] >= row['connection.orig_ip_bytes'] - 50) & (df_filtered['connection.orig_ip_bytes'] <= row['connection.orig_ip_bytes'] + 50) df_filtered = df_filtered.loc[mask] mask = (df_filtered['connection.resp_ip_bytes'] >= row['connection.resp_ip_bytes'] - 50) & (df_filtered['connection.resp_ip_bytes'] <= row['connection.resp_ip_bytes'] + 50) df_filtered = df_filtered.loc[mask] # remove original connection from neighbourhood (empty will have size 0 instead of 1) mask = (df_filtered['connection.uid'] != row['connection.uid']) df_filtered = df_filtered.loc[mask] return {prefix + '_similar_conns_count': df_filtered.shape[0]} # + def check_attr_value(x, attr_str, row_attr_vals_list): if isinstance(x, float) and np.isnan(x): return False if isinstance(x, list) and len(x) < 1: return False if isinstance(x, str) and x == '[]': return False if isinstance(row_attr_vals_list, list) and len(row_attr_vals_list) > 0: for attribute in x: if attribute in row_attr_vals_list: return True return False def get_similar_attributes_count(df, row, prefix): neighbourhood_attributes_dict = {} attributes = ['dns_qtype', 'dns_rcode', 'ssh_auth_attempts', 'ssh_host_key', 'http_method', 'http_status_code', 'http_user_agent', 'ssl_version', 'ssl_cipher', 'ssl_curve', 'ssl_validation_status', 'files_source', 'file_md5'] for attr in attributes: if not row[attr]: # attribute value list is empty, no similarity is counted attr_dict = {prefix + '_similar_' + attr + '_count': 0} neighbourhood_attributes_dict.update(attr_dict) else: # filter mask = df[attr].apply(lambda x: check_attr_value(x, attr, row[attr])) df_filtered = df.loc[mask] # remove original connection from neighbourhood (empty will have size 0 instead of 1) mask = (df_filtered['connection.uid'] != row['connection.uid']) df_filtered = df_filtered.loc[mask] # add attribute count to dictionary that contains all counts attr_dict = {prefix + '_similar_' + attr + '_count': df_filtered.shape[0]} neighbourhood_attributes_dict.update(attr_dict) return neighbourhood_attributes_dict # - def compute_time_neighbourhood(host_ip, dfs_list, time_col_name, cur_time, time_start, time_end, row, prefix): if host_ip in dfs_list: ip_df = dfs_list[host_ip] mask = (ip_df[time_col_name] > time_start) & (ip_df[time_col_name] <= time_end) df = ip_df.loc[mask] if len(df) > 0: neighbourhood_dict = {} neighbourhood_counts = get_counts(df, prefix) neighbourhood_modes = get_modes(df, prefix) neighbourhood_means = get_means(df, prefix) # neighbourhood_medians = get_medians(df, prefix) neighbourhood_orig_ports = get_orig_ports(df, prefix) neighbourhood_resp_ports = get_resp_ports(df, prefix) neighbourhood_stats_means = get_stats_means(df, prefix) neighbourhood_similar_count = get_similar_count(df, row, prefix) neighbourhood_similar_attributes_count = get_similar_attributes_count(df, row, prefix) neighbourhood_dict.update(neighbourhood_counts) neighbourhood_dict.update(neighbourhood_modes) neighbourhood_dict.update(neighbourhood_means) # neighbourhood_dict.update(neighbourhood_medians) neighbourhood_dict.update(neighbourhood_orig_ports) neighbourhood_dict.update(neighbourhood_resp_ports) neighbourhood_dict.update(neighbourhood_stats_means) neighbourhood_dict.update(neighbourhood_similar_count) neighbourhood_dict.update(neighbourhood_similar_attributes_count) return neighbourhood_dict return {prefix + '_total': 0, prefix + '_proto_tcp_count': 0, prefix + '_proto_udp_count': 0, prefix + '_proto_icmp_count': 0, prefix + '_connection.protocol_mode': '-', prefix + '_connection.service_mode': '-', prefix + '_connection.conn_state_mode': '-', prefix + '_connection.time_mean': cur_time, # time_mean: 0 could not be here => problem later with time conversion (missing year) # (but does it make sense as a default value?) prefix + '_connection.duration_mean': 0, prefix + '_connection.orig_bytes_mean': 0, prefix + '_connection.orig_pkts_mean': 0, prefix + '_connection.resp_bytes_mean': 0, prefix + '_connection.resp_pkts_mean': 0, prefix + '_orig_p_well_known_count': 0, prefix + '_orig_p_reg_or_dyn_count': 0, prefix + '_resp_p_21_count': 0, prefix + '_resp_p_22_count': 0, prefix + '_resp_p_53_count': 0, prefix + '_resp_p_80_count': 0, prefix + '_resp_p_123_count': 0, prefix + '_resp_p_443_count': 0, prefix + '_resp_p_3389_count': 0, prefix + '_resp_p_well_known_count': 0, prefix + '_resp_p_reg_count': 0, prefix + '_resp_p_dyn_count': 0, prefix + '_dns_count_mean': 0, prefix + '_ssh_count_mean': 0, prefix + '_http_count_mean': 0, prefix + '_ssl_count_mean': 0, prefix + '_files_count_mean': 0, prefix + '_similar_conns_count': 0, prefix + '_similar_dns_qtype_count': 0, prefix + '_similar_dns_rcode_count': 0, prefix + '_similar_ssh_auth_attempts_count': 0, prefix + '_similar_ssh_host_key_count': 0, prefix + '_similar_http_method_count': 0, prefix + '_similar_http_status_code_count': 0, prefix + '_similar_http_user_agent_count': 0, prefix + '_similar_ssl_version_count': 0, prefix + '_similar_ssl_cipher_count': 0, prefix + '_similar_ssl_curve_count': 0, prefix + '_similar_ssl_validation_status_count': 0, prefix + '_similar_files_source_count': 0, prefix + '_similar_file_md5_count': 0 } # + NEIGHBOURHOOD_TIME_WINDOW_MINUTES_ORIG_DIRECTION = 5 NEIGHBOURHOOD_TIME_WINDOW_MINUTES_RESP_DIRECTION = 2 def compute_neighbourhoods(cur_orig_ip, dfs_list_orig, dfs_list_resp): df_result = pd.DataFrame() print('[{}]: Computing neighbourhood for connections of originator {:15} ({})'.format(datetime.now().strftime("%H:%M:%S"), cur_orig_ip, str(len(dfs_list_orig[cur_orig_ip])))) # iterate over rows in originated connections df of host with cur_orig_ip IP address: for index, row in dfs_list_orig[cur_orig_ip].iterrows(): cur_row_dict = row.to_dict() cur_time = row['connection.time'] time_start_orig = cur_time - pd.Timedelta(minutes=NEIGHBOURHOOD_TIME_WINDOW_MINUTES_ORIG_DIRECTION) time_end_orig = cur_time + pd.Timedelta(minutes=NEIGHBOURHOOD_TIME_WINDOW_MINUTES_ORIG_DIRECTION) time_start_resp = cur_time - pd.Timedelta(minutes=NEIGHBOURHOOD_TIME_WINDOW_MINUTES_RESP_DIRECTION) time_end_resp = cur_time + pd.Timedelta(minutes=NEIGHBOURHOOD_TIME_WINDOW_MINUTES_RESP_DIRECTION) ip_responder = row['responded_ip'] try: # compute neighbourhoods (from originated connections for originator, from responded connections for responder): originator_neighbourhood = compute_time_neighbourhood(cur_orig_ip, dfs_list_orig, 'connection.time', cur_time, time_start_orig, time_end_orig, row, 'orig_orig') originator_neighbourhood2 = compute_time_neighbourhood(cur_orig_ip, dfs_list_resp, 'connection.time', cur_time, time_start_resp, time_end_resp, row, 'orig_resp') responder_neighbourhood = compute_time_neighbourhood(ip_responder, dfs_list_orig, 'connection.time', cur_time, time_start_orig, time_end_orig, row, 'resp_orig') responder_neighbourhood2 = compute_time_neighbourhood(ip_responder, dfs_list_resp, 'connection.time', cur_time, time_start_resp, time_end_resp, row, 'resp_resp') cur_row_dict.update(originator_neighbourhood) cur_row_dict.update(originator_neighbourhood2) cur_row_dict.update(responder_neighbourhood) cur_row_dict.update(responder_neighbourhood2) # concat to one long row and to df_result: row_df = pd.DataFrame([cur_row_dict]) df_result = pd.concat([df_result, row_df], axis=0, ignore_index=True) except: print('Problem with originator {} and responder {} ({})'.format(cur_orig_ip, ip_responder, row['connection.uid'])) pass return df_result # + from datetime import datetime import multiprocessing from multiprocessing import Pool from functools import partial from contextlib import contextmanager @contextmanager def poolcontext(*args, **kwargs): pool = multiprocessing.Pool(*args, **kwargs) yield pool pool.terminate() # compute neighbourhoods using multiple threads (time optimalization): print('Start at ' + datetime.now().strftime("%H:%M:%S") + '.') with poolcontext(processes=32) as pool: dfs_with_neighbourhoods = pool.map( partial(compute_neighbourhoods, dfs_list_orig=dfs_orig, dfs_list_resp=dfs_resp), dfs_orig.keys()) print('Done at ' + datetime.now().strftime("%H:%M:%S") + '.') # - print(type(dfs_with_neighbourhoods)) print(len(dfs_with_neighbourhoods)) dfs_with_neighbourhoods[0].head() # + [markdown] id="lYFgAiG_yjkY" # ### 2. Concatenate to one final DataFrame # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="rjVpvSlvhtt_" outputId="0ba825ff-def0-4494-a513-8837fa99b265" def concat_dfs(df_neighourhoods): df_result = pd.DataFrame() for i in range(0, len(df_neighourhoods)): df_i = df_neighourhoods[i] df_result = df_result.append(df_i) return df_result df_result = concat_dfs(dfs_with_neighbourhoods) # - df_result # + # backup: from datetime import date df_result.to_csv(PREFIX + 'query_output_preprocessing_checkpoint_' + date.today().strftime("%d_%m") + '.csv', index=False, header=True) # from datetime import datetime # import pandas as pd # df_result = pd.read_csv(PREFIX + 'query_output_preprocessing_checkpoint_' + date.today().strftime("%d_%m") + '.csv') # df_result['connection.time'] = pd.to_datetime(df_result['connection.time']) # - df_result # ### 3. Assign attacker labels # + id="kS-tCgfhhtY9" df_result['attacker_label'] = 'No' df_result['victim_label'] = 'No' # + # assign labels to input data as: # 'No' - not from/ to attacker # 'Yes' - originated from/ responded to attacker df_result.loc[df_result['responded_ip'] == '172.16.0.1', 'attacker_label'] = 'Yes' df_result.loc[df_result['originated_ip'] == '172.16.0.1', 'attacker_label'] = 'Yes' # - df_result df_result['attacker_label'].value_counts() # + # assign labels to input data as: # 'No' - not from/ to victim # 'Yes' - originated from/ responded to victim df_result.loc[df_result['responded_ip'] == '192.168.10.50', 'victim_label'] = 'Yes' df_result.loc[df_result['originated_ip'] == '192.168.10.50', 'victim_label'] = 'Yes' # - df_result['victim_label'].value_counts() # ### 4. Write to file print(len(df_result)) df_result.to_csv(PREFIX + 'query_output_processing.csv', index=False, header=True)
impl/jupyter_notebooks/cicids2017_attacks/2_thursday/web_attack_brute_force/query_output_preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Learning Machine Learning # # *This worksheet was originally designed by [<NAME>](https://www.math.ucla.edu/~egeo/) (Department of Mathematics, UCLA). It has been subsequently revised by later TAs and instructors.* # # Today we're going to take the time to review the basic concepts for machine learning models. Some of you might have seen this in PIC16A or somewhere else, some of you might have not. That's okay! We're going to go through things from the top and fill in the details you might be missing. Throughout this worksheet, I'll be introducing a lot of vocabulary that might be new to you. These will be written in **bold**. Don't worry about memorizing them, we're not going to ask you to repeat the definitions on an exam! However, by the end of the course, you should be comfortable using this vocabulary if you aren't already. # # Let's first discuss the basic setting of machine learning. The basic idea is that there is some pattern that exists in our dataset that we want our computer to learn. We will do this by creating a **model**. There are two main types of problems: # # 1. We want our program to learn *groups* in the data. For example, our dataset could be "images that contain traffic lights" and "images that do not contain traffic lights" (hi Google!). We want our model to be able to take new data and identify what group it belongs. In our example, we would want to be able to supply our program with a new image and have it tell us whether or not contains a traffic light. This type of problem is often called **classification** and the groups are often called **classes**. # 2. We want our program to learn *values* in the data. The values here should not just be integers, but also include decimals (i.e., real numbers). We want our model to be able to predict what this value would be when it sees new data. For example, our data could be a list of attributes (age, weight, etc.) about an animal and we are concerned about its height. In this example, we would want to be able to take to get information about a new animal and be able to guess its height. This problem is often called **regression**, although I think that term is a little less common than "classification" is. If you are familiar with mathematical terminology, this is often described as there being some function on the data and we want to learn how to evaluate the function. # # The distinction between these two problems is important, because the types of models that we can use to solve them are completely different. Fortunately, the general process to set up an accurate model is more or less the same. The outline for this process is this: # # 1. We prepare the data. This is called **pre-processing**. # 2. We prepare the model. There isn't really a name for this process, but you might hear **building** to refer to this. # 3. We give data to the model for it to learn from. This is called **training**. # 4. We see how well the model performs on new data. This is called **testing** or evaluation. # # The way models work is that they are predicting some piece of information (the value or class above) from given information in our dataset. The information we are trying to predict is called the **labels** and the information we are given is called the **features**. In training, the model has access to the labels and the features and attempts to learn how to predict the labels from the features. In testing, the model only has access to the features and comes up with labels on its own. We then see how accurate the predicted labels are compared to the true labels to evaluate how good our model is. # # Now, let's go with an example problem to go through these steps. We will be using the [MNIST](https://en.wikipedia.org/wiki/MNIST_database) dataset. # # Before we start pre-processing, it's always a good idea to look at the dataset we're using. This will help us understand how the data is set up and what problem we want to be solving. # + import tensorflow as tf (x_train, y_train), (x_test, y_test) = \ tf.keras.datasets.mnist.load_data() # - # What is this dataset? When we look at the wikipedia article for MNIST, we see that it is a dataset of *images*. Let's take a look at how it's stored. We get four different NumPy arrays when we load MNIST. For now, let's just worry about `x_train`. The others will be explained later. x_train.shape # The `x_train` variable is a 3D NumPy array of size 60000 x 28 x 28. In a machine learning dataset, the convention is that all information that share the same first index represents the same "point" in the dataset. As an example, here it means that the NumPy array `x_train[0,:,;]` represents the first image in the dataset. Let's take a look at it. import matplotlib.pyplot as plt plt.imshow(x_train[0,:,:], cmap='binary') plt.show() # We get an image that looks like the number 5. Great! The MNIST dataset contains images that all depict one digit between 0 and 9. Our goal is to figure out what digit each image shows. What type of problem is this? # # Well, it might sound like it's a regression problem, since we are trying to get a number. However, regression problems are supposed to be *continuous*, which means that we want our valid answers to be *any* real number, at least within a certain range. Here, we want our numbers to be between 0 and 9, but an answer like "4.5" is not going to be a sensible answer. Since there are only ten different digits it could be, this is actually a classificaiton problem. # # Now that we've identified our problem, let's continue. We want to do one more thing, which is set the random seed for NumPy. This is so that if we run our code more than once, we will get the same results. # + import numpy as np np.random.seed(184) # - # ## Pre-processing # # In pre-processing, we prepare our data so that our model can use it. There are a lot of things we might want to do during pre-processing, but the two most important are to *clean* and *split* the data, which should also be done first. # # The very first thing we should do is **clean** the data, which is to rfix wrong and missing data in it. In our data, we don't need to clean anything. But it's always good to check first. If there is any value in your dataset that is obviously wrong, remove it. Usually these are absurd numbers, such as a person's height being recorded as 60 feet or as -1 feet. Sometimes this will be a mislabeled categorical variable, such as a penguin's sex being recorded as "." (as some might remember from PIC16A). There will also likely be missing data, which might be recorded in the data as None or NaN. Sometimes a blank spot is recorded as an empty string or as -1, but only if these aren't also valid values for that feature to be. # # If there's a missing or incorrect value in the data, there are three options to deal with it: # 1. Leave it blank. Some models can deal with missing data. Most can't, so this is rarely an option. But if our model can, then all we need to do to clean your data is blank out the incorrect values. # 2. Remove the entire row (i.e, the entire data point). This is usually the correct choice. # 3. Remove the entire column (i.e., the entire feature). This is what we should do if a feature has a lot of missing values. Generally, we have more points than features, so removing a feature hurts more than removing a point. However, it's usually not worth cutting the number of points you can use in half just to keep one extra feature. # Once we finish cleaning our data, we want to split it. The first split we want to do is to remove the labels from the features. Generally we call the data containing the features `x` and the data containing the labels `y`. We can see that this is already done in our dataset, but in general we'd have to do ourself. This is usually done by just getting a column from the dataset as the labels and removing it from the dataset to get the features. # # Next, we want to split the data into either two or three sets, depending. These are: # 1. The **training set**. This will contain most of the data and it the data the model will learn from. # 2. The **test set**. This is what we will use to evaluate the final accuracy of the model after it's finished. (You might hear this be called a **hold-out set**, it's the same thing.) # 3. Sometimes we use a **validation set**. Like a test set, this is used to evaluate the accuracy of the model after it's been trained. # # We split our data to protect against **overfitting**. An easy way to evaluate the model is to see how well it can generate labels on the data it was trained on. However, the model could have learned *any* way to tell the labels from the features. It could have learned the pattern we were looking for or it could have learned some other, likely more subtle and spurious, pattern. For example, maybe in our dataset for MNIST all the "5" images have a dark pixel in exactly one spot. The model learns this and gets very good accuracy, but this is not a useful way to determine a number in a general setting. Splitting the data allows us to see how well a model on data it didn't see during training. # # So why would we want two sets? Well, if we're making a model, we want it to do well on the test set. If we make a model that does poorly on the test set, we're going to change our model a bit so it does better. We keep doing this until the model does well on the test set, and then we're satisfied. This sounds great, but unfortunately, can result in the model "learning" from the test set in a very subtle way. To prevent this, we test our intermediate models on a validation set (which we don't have to keep the same for each new model) until we are satisfied. Then we get a final accuracy on a the test set, which we have never used before. # # As an alternative to creating an explicit validation set, we can do **cross-validation**. This is when we create multiple train/validation set pairs from the training set and compares the model on all of these pairs. Scikit-learn provides a method to do this for its models. # # (There is an issue in terminology: sometimes "validation set" and "test set" get switched and mean the opposite of above. We won't do that in this course, though. Also, sometimes you might hear **development set** (**dev set**), which only ever means what we call a validation set.) # # We can use the Scikit-learn method `train_test_split` to create the test (and validation) set. Our data already has a train/test split, but let's make a train/validation split in it as well. This method also shuffles the data by default, which is useful. # + from sklearn.model_selection import train_test_split x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train) # - # The size of your test set and validation set can vary. By default, `train_test_split` makes this 25% of your data, which is fine. We can see that our data already has a test set that is 1/7 of the entire dataset, which is about 14%. This is fine too. Some proportion close to these is good: there's no real rule about what it has to be. I like to do 20%, myself. # # You can split the data into features/labels and train/test in either order. If you split into features/labels first, make sure your train/test split is the same for both parts. We want a row in `x_train` to correspond to the row in `y_train` with the same index. Don't do this: # incorrect! x_train, x_test = train_test_split(x) # x here is our features y_train, y_test = train_test_split(y) # y is our labels # That makes the test set different for the labels and the features, which will destroy the connection between them. # # (One last note on splitting: if you have classes in your data (either in the labels or the features), you might want to split to make sure every class appears in the training set. There are methods to do this. This is mostly an issue if one of the classes is particularly rare. In our case, it's very unlikely that a class is going to be missing in the training set, so it's okay.) # Beyond splitting, there's other things we might want to do to preprocess the data. In our data, we can see that each "row" of the data actually has two dimensions. This is useful in some models, but not for any of the models we know about now. We're going to reshape the data so that each row just has 784 features. x_train = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_train.shape[2]) x_valid = x_valid.reshape(x_valid.shape[0], x_valid.shape[1]*x_valid.shape[2]) x_test = x_test.reshape(x_test.shape[0], x_test.shape[1]*x_test.shape[2]) # Two of the most common pre-processing techniques are to **standardize** quantitative features (numbers) and to **encode** categorical features (classes, descriptions). # # When we standardize features, we modify them so they have mean 0 and standard deviation 1 in the training set. We can think of this as just changing the units on the features. This prevents the model from thinking one feature is more significant than another just because the numbers are larger (this actually happens!). # + from sklearn.preprocessing import StandardScaler scaler = StandardScaler().fit(x_train) x_train = scaler.transform(x_train) x_valid = scaler.transform(x_valid) x_test = scaler.transform(x_test) # - # We don't have any categorical features, so we don't have anything to encode. If we did, we would want to use Scikit-learn's `LabelEncoder` or `OneHotEncoder`. These turn the categorical into numbers so that models can understand them. They work in different ways and might have different impacts on your model. Try them both if you ever need to use categorical features! # ## Building the Model # Now, we build the model. There's very little to do here, especially if you use Scikit-learn! All we need to do is load the model. For example, let's do a variant of SVM (Support Vector Machine) model. # + from sklearn.linear_model import SGDClassifier model = SGDClassifier(max_iter = 10) # - # All done! # # Here's when we would input and toy with the various attributes of your model that we don't train (the so-called **hyperparameters**). These are going to be the parameters of the model's initializer, and include things such as "complexity" and learning rate. Every model has different hyperparameters, and changing these can give us better or worse results. We can try a bunch of different hyperparameters and take the model that does the best on the validation set. # # Here we set the the `max_iter` hyperparamter to 10. This makes our model worse, but makes it train faster. # ## Training the Model # # After we've built the model, we want to train it. All Scikit-learn models are trained the same way. model.fit(x_train, y_train) # Our model gives us an error when we train it. This is because we didn't allow the model to run fast enough for it to converge, which means it never stabilized. This is a problem, but not a huge one. We can fix this by increasing the `max_iter` hyperparameter, but then the training would take longer than we have right now. # ## Testing the Model # Lastly, we test the model. The first thing we should do is see how the model performs on the training set. This number is not as important as the accuracy on the validation or test sets, but it's a nice measure to have. In particular, if our accuracy on the validation set is not so good, the accuracy on the training test let's us know what's wrong with the model. If the accuracy on the training set is high, this means our model was overfitting on the training set. We should fix this by reducing the complexity of the model or the length of time it was being trained. If the accuracy on the training set is low, this means our model failed to learn the pattern it was supposed to. We should fix this by increasing the complexity of the model or the length time it was being trained. sum(y_train == model.predict(x_train))/len(x_train) # The accuracy is above 90%, which is pretty good! We could do better with a different model, but this is a good first start. Now we look at the validation set. Our hope is the accuracy is pretty much the same. If it's less, that means we overfitted the model on our data. If it's more... that's weird, but not really a problem. sum(y_valid == model.predict(x_valid))/len(x_valid) # The accuracy dropped a little bit. This is to be expected. However, it didn't drop a whole lot, so that's great! It looks like we didn't overfit our model, and it generalized well to new data. Because of this, we have no reason to go back and change our model, so we can go ahead and evaluate it on our test set to get our final accuracy. sum(y_test == model.predict(x_test))/len(x_test) # Again, a little worse than the training set accuracy, but still above 90%. At this point, since we already evaluated it on our test set, we shouldn't go back and make changes to our model.
discussion/ml_basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="https://raw.githubusercontent.com/Qiskit/qiskit-tutorials/master/images/qiskit-heading.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> # # Repetition codes on real devices # # * Requires qiskit-ignis 0.2.0 or greater. # # ### Contributors # # <NAME>. Wootton, IBM Research # In [another notebook](../../qiskit/ignis/repetition_code.ipynb) we saw how to use the `RepetitionCode` and `GraphDecoder` tools from Ignis. Here we'll specifcially look at how to use them with real hardware. from qiskit.ignis.verification.topological_codes import RepetitionCode from qiskit.ignis.verification.topological_codes import GraphDecoder from qiskit.ignis.verification.topological_codes import lookuptable_decoding, postselection_decoding # Let's start off easy, with a `d=3`, `T=1` repetition code. d = 3 T = 1 code = RepetitionCode(d,T) # The circuits for this are found in `code.circuit['0']` (for an encoded `0`) and code.circuit['1'] (for an encoded `1`). We can use `count_ops` to see which operations these contain. code.circuit['0'].count_ops() code.circuit['1'].count_ops() # Both contain 4 `cx` gates, which is exactly what we'd expect from this code. # # Now let's set up a real device as a backend, and compile these circuits for it. # + from qiskit import IBMQ from qiskit.compiler import transpile from qiskit.transpiler import PassManager provider = IBMQ.load_account() backend = provider.get_backend('ibmq_16_melbourne') qc0 = transpile(code.circuit['0'], backend=backend) qc1 = transpile(code.circuit['1'], backend=backend) # - # Let's see what has happened to the gates they contain. print('gates for encoded 0 = ', qc0.count_ops()) print('gates for encoded 1 = ', qc1.count_ops()) # The single qubit gates are now all `u2`s and `u3`s, as usually happens when we compile to real hardware. But the number of `cx` gates has increased! This implies some remapping has occurred, which is not what we want for error correction. To avoid this, we first need to provide look at the coupling map of the device. coupling_map = backend.configuration().coupling_map print(coupling_map) # We can think of the qubits of a repetition code as sitting along a line (alternating between 'code' and 'link' qubits), with each qubit interacting only with its neighbours. So we need to look at the coupling map and find a line that covers as many qubits as we can. One possibility is line = [0,1,2,3,4,5,6,8,9,10,11,12,13] # With this we can set up an `initial_layout` dictionary, which tells us exactly which qubit in the circuit corresponds to which qubits on the device. # + def get_initial_layout(code,line): initial_layout = {} for j in range(d): initial_layout[code.code_qubit[j]] = line[2*j] for j in range(d-1): initial_layout[code.link_qubit[j]] = line[2*j+1] return initial_layout initial_layout = get_initial_layout(code,line) print(initial_layout) # - # Now let's compile using this. # + qc0 = transpile(code.circuit['0'], backend=backend, initial_layout=initial_layout) qc1 = transpile(code.circuit['1'], backend=backend, initial_layout=initial_layout) print('gates for d = '+str(d)+' with encoded 0:', qc0.count_ops()) print('gates for d = '+str(d)+' with encoded 1:', qc1.count_ops()) # - # The number of `cx` gates is now as it should be. # # Before running, we need to remove the reset gates. For `T=1`, they don't actually do anything anyway. But since they aren't currently supported on hardware, they may cause use trouble. def remove_reset(qc): qc.data = [ gate for gate in qc.data if gate[0].name!='reset' ] return qc qc0 = remove_reset(qc0) qc1 = remove_reset(qc1) # Now we can actually run the circuits. So let's set up a function to do this for us. # + from qiskit import execute, Aer from qiskit.providers.aer import noise def get_syndrome(circuits,backend,sim=False,shots=8192): if sim: noise_model = noise.device.basic_device_noise_model(backend.properties()) job = execute( circuits, Aer.get_backend('qasm_simulator'), noise_model=noise_model, shots=shots ) else: job = execute( circuits, backend, shots=shots ) raw_results = {} for log in ['0','1']: raw_results[log] = job.result().get_counts(log) return code.process_results( raw_results ) # - # This has a `sim` argument, with which we can choose whether to actually use the real device, or just use the noise model we get from the device in a simulation. # # Let's just simulate for now. sim = True results = get_syndrome([qc0,qc1],backend,sim=sim) print(results) # And we can decode the results. # + dec = GraphDecoder(code) logical_prob_match = dec.get_logical_prob(results) logical_prob_lookup = lookuptable_decoding(results,results) logical_prob_post = postselection_decoding(results) for log in ['0','1']: print('d =',d,',log =',log) print('logical error probability for matching =',logical_prob_match[log]) print('logical error probability for lookup table =',logical_prob_lookup[log]) print('logical error probability for postselection =',logical_prob_post[log]) print('') # - # Now let's see what happens when we look at different code sizes. for d in range(3,8): code = RepetitionCode(d,1) initial_layout = get_initial_layout(code,line) circuits = [ transpile(code.circuit[log], backend=backend, initial_layout=initial_layout) for log in ['0','1'] ] circuits = [ remove_reset(qc) for qc in circuits ] print('gates for d = '+str(d)+' with encoded 0:', circuits[0].count_ops(), '\n') results = get_syndrome(circuits,backend,sim=sim) dec = GraphDecoder(code) logical_prob_match = dec.get_logical_prob(results) logical_prob_lookup = lookuptable_decoding(results,results) logical_prob_post = postselection_decoding(results) for log in ['0','1']: print('d =',d,',log =',log) print('logical error probability for matching =',logical_prob_match[log]) print('logical error probability for lookup table =',logical_prob_lookup[log]) print('logical error probability for postselection =',logical_prob_post[log]) print('') print('') # If we look only at odd `d`, here we see a steady decrease in the logical error probability, as expected. A decrease is not seen between each odd $d$ and the following $d+1$ for the matching decoder. This is due to the fact that the number of errors required to overturn a clear majority is the same in both cases. # Note that all the jobs above could have been sent in a single batch, which speeds things up for real devices. Also, two separate sets of results for each code should really be obtained and used in `lookuptable_decoding` to prevent over-fitting. These things were not done to keep this a simpler and clearer tutorial. keywords = {'Topics': ['Ignis', 'Quantum error correction'], 'Commands': ['`RepetitionCode`', '`GraphDecoder`', '`transpile`', '`initial_layout`']}
ignis/repetition_code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `NumPy`: векторы и операции над ними (Версия для Python 3) # --- # В этом ноутбуке нам понадобятся библиотека `NumPy`. Для удобства импортируем ее под более коротким именем: import numpy as np # ## 1. Создание векторов # Самый простой способ создать вектор в `NumPy` — задать его явно с помощью __`numpy.array(list, dtype=None, ...)`__. # # Параметр __`list`__ задает итерируемый объект, из которого можно создать вектор.Например, в качестве этого параметра можно задать список чисел. Параметр __`dtype`__ задает тип значений вектора, например, __`float`__ — для вещественных значений и __`int`__ — для целочисленных. Если этот параметр не задан, то тип данных будет определен из типа элементов первого аргумента. a = np.array([1, 2, 3, 4]) print('Вектор:\n', a) b = np.array([1, 2, 3, 4, 5], dtype=float) print('Вещественный вектор:\n', b) c = np.array([True, False, True], dtype=bool) print('Булевский вектор:\n', c) # Тип значений вектора можно узнать с помощью __`numpy.ndarray.dtype`__: print('Тип булевского вектора:\n', c.dtype) # Другим способом задания вектора является функция __`numpy.arange(([start, ]stop, [step, ]...)`__, которая задает последовательность чисел заданного типа из промежутка __[`start`, `stop`)__ через шаг __`step`__: d = np.arange(start=10, stop=20, step=2) # последнее значение не включается! print('Вектор чисел от 10 до 20 с шагом 2:\n', d) f = np.arange(start=0, stop=1, step=0.3, dtype=float) print('Вещественный вектор чисел от 0 до 1 с шагом 0.3:\n', f) # По сути вектор в `NumPy` является одномерным массивом, что соответствует интуитивному определению вектора: print(c.ndim) # количество размерностей print(c.shape) # shape фактически задает длину вектора # __Обратите внимание:__ _вектор _и одномерный массив тождественные понятия в `NumPy`. Помимо этого, также существуют понятия _вектор-столбец_ и _вектор-строка_, которые, несмотря на то что математически задают один и тот же объект, являются двумерными массивами и имеют другое значение поля __`shape`__ (в этом случае поле состоит из двух чисел, одно из которых равно единице). Эти тонкости будут рассмотрены в следующем уроке. # Более подробно о том, как создавать векторы в `NumPy`, # см. [документацию](http://docs.scipy.org/doc/numpy-1.10.1/user/basics.creation.html). # ## 2. Операции над векторами # Векторы в `NumPy` можно складывать, вычитать, умножать на число и умножать на другой вектор (покоординатно): # + a = np.array([1, 2, 3]) b = np.array([6, 5, 4]) k = 2 print('Вектор a:', a) print('Вектор b:', b) print('Число k:', k) # - print('Сумма a и b:\n', a + b) print('Разность a и b:\n', a - b) print('Покоординатное умножение a и b:\n', a * b ) print('Умножение вектора на число (осуществляется покоординатно):\n', k * a) # ## 3. Нормы векторов # Вспомним некоторые нормы, которые можно ввести в пространстве $\mathbb{R}^{n}$, и рассмотрим, с помощью каких библиотек и функций их можно вычислять в `NumPy`. # ### p-норма # p-норма (норма Гёльдера) для вектора $x = (x_{1}, \dots, x_{n}) \in \mathbb{R}^{n}$ вычисляется по формуле: # # $$ # \left\Vert x \right\Vert_{p} = \left( \sum_{i=1}^n \left| x_{i} \right|^{p} \right)^{1 / p},~p \geq 1. # $$ # В частных случаях при: # * $p = 1$ получаем $\ell_{1}$ норму # * $p = 2$ получаем $\ell_{2}$ норму # Далее нам понабится модуль `numpy.linalg`, реализующий некоторые приложения линейной алгебры. Для вычисления различных норм мы используем функцию __`numpy.linalg.norm(x, ord=None, ...)`__, где __`x`__ — исходный вектор, __`ord`__ — параметр, определяющий норму (мы рассмотрим два варианта его значений — 1 и 2). Импортируем эту функцию: from numpy.linalg import norm # ### $\ell_{1}$ норма # $\ell_{1}$ норма # (также известная как [манхэттенское расстояние](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%B3%D0%BE%D1%80%D0%BE%D0%B4%D1%81%D0%BA%D0%B8%D1%85_%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB%D0%BE%D0%B2)) # для вектора $x = (x_{1}, \dots, x_{n}) \in \mathbb{R}^{n}$ вычисляется по формуле: # # $$ # \left\Vert x \right\Vert_{1} = \sum_{i=1}^n \left| x_{i} \right|. # $$ # Ей в функции __`numpy.linalg.norm(x, ord=None, ...)`__ соответствует параметр __`ord=1`__. a = np.array([1, 2, -3]) print('Вектор a:', a) print('L1 норма вектора a:\n', norm(a, ord=1)) # ### $\ell_{2}$ норма # $\ell_{2}$ норма (также известная как евклидова норма) # для вектора $x = (x_{1}, \dots, x_{n}) \in \mathbb{R}^{n}$ вычисляется по формуле: # # $$ # \left\Vert x \right\Vert_{2} = \sqrt{\sum_{i=1}^n \left( x_{i} \right)^2}. # $$ # Ей в функции __`numpy.linalg.norm(x, ord=None, ...)`__ соответствует параметр __`ord=2`__. a = np.array([1, 2, -3]) print('Вектор a:', a) print('L2 норма вектора a:\n', norm(a, ord=2)) # Более подробно о том, какие еще нормы (в том числе матричные) можно вычислить, см. [документацию](http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.linalg.norm.html). # ## 4. Расстояния между векторами # Для двух векторов $x = (x_{1}, \dots, x_{n}) \in \mathbb{R}^{n}$ и $y = (y_{1}, \dots, y_{n}) \in \mathbb{R}^{n}$ $\ell_{1}$ и $\ell_{2}$ раccтояния вычисляются по следующим формулам соответственно: # # $$ # \rho_{1}\left( x, y \right) = \left\Vert x - y \right\Vert_{1} = \sum_{i=1}^n \left| x_{i} - y_{i} \right| # $$ # # $$ # \rho_{2}\left( x, y \right) = \left\Vert x - y \right\Vert_{2} = # \sqrt{\sum_{i=1}^n \left( x_{i} - y_{i} \right)^2}. # $$ a = np.array([1, 2, -3]) b = np.array([-4, 3, 8]) print('Вектор a:', a) print('Вектор b:', b) print('L1 расстояние между векторами a и b:\n', norm(a - b, ord=1)) print('L2 расстояние между векторами a и b:\n', norm(a - b, ord=2)) # Также расстояние между векторами можно посчитать с помощью функции __`scipy.spatial.distance.cdist(XA, XB, metric='euclidean', p=2, ...)`__ из модуля `SciPy`, предназначенного для выполнения научных и инженерных расчётов. from scipy.spatial.distance import cdist # __`scipy.spatial.distance.cdist(...)`__ требует, чтобы размерность __`XA`__ и __`XB`__ была как минимум двумерная. По этой причине для использования этой функции необходимо преобразовать _векторы_, которые мы рассматриваем в этом ноутбуке, к _вектор-строкам_ с помощью способов, которые мы рассмотрим ниже. # Параметры __`XA, XB`__ — исходные вектор-строки, а __`metric`__ и __`p`__ задают метрику расстояния # (более подробно о том, какие метрики можно использовать, см. [документацию](http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.spatial.distance.cdist.html)). # Первый способ из _вектора_ сделать _вектор-строку (вектор-столбец)_ — это использовать _метод_ __`array.reshape(shape)`__, где параметр __`shape`__ задает размерность вектора (кортеж чисел). a = np.array([6, 3, -5]) b = np.array([-1, 0, 7]) print('Вектор a:', a) print('Его размерность:', a.shape) print('Вектор b:', b) print('Его размерность:', b.shape) a = a.reshape((1, 3)) b = b.reshape((1, 3)) print('После применения метода reshape:\n') print('Вектор-строка a:', a) print('Его размерность:', a.shape) print('Вектор-строка b:', b) print('Его размерность:', b.shape) print('Манхэттенское расстояние между a и b (через cdist):', cdist(a, b, metric='cityblock')) # Заметим, что после применения этого метода размерность полученных вектор-строк будет равна __`shape`__. Следующий метод позволяет сделать такое же преобразование, но не изменяет размерность исходного вектора. # В `NumPy` к размерностям объектов можно добавлять фиктивные оси с помощью __`np.newaxis`__. Для того, чтобы понять, как это сделать, рассмотрим пример: d = np.array([3, 0, 8, 9, -10]) print('Вектор d:', d) print('Его размерность:', d.shape) # + print ('Вектор d с newaxis --> вектор-строка:\n', d[np.newaxis, :]) print ('Полученная размерность:', d[np.newaxis, :].shape) print ('Вектор d с newaxis --> вектор-столбец:\n', d[:, np.newaxis]) print ('Полученная размерность:', d[:, np.newaxis].shape) # - # Важно, что __`np.newaxis`__ добавляет к размерности ось, длина которой равна 1 (это и логично, так как количество элементов должно сохраняться). Таким образом, надо вставлять новую ось там, где нужна единица в размерности. # Теперь посчитаем расстояния с помощью __`scipy.spatial.distance.cdist(...)`__, используя __`np.newaxis`__ для преобразования векторов: a = np.array([6, 3, -5]) b = np.array([-1, 0, 7]) print ('Евклидово расстояние между a и b (через cdist):', cdist(a[np.newaxis, :], b[np.newaxis, :], metric='euclidean')) # Эта функция также позволяет вычислять попарные расстояния между множествами векторов. Например, пусть у нас имеется матрица размера $m_{A} \times n$. Мы можем рассматривать ее как описание некоторых $m_{A}$ наблюдений в $n$-мерном пространстве. Пусть также имеется еще одна аналогичная матрица размера $m_{B} \times n$, где $m_{B}$ векторов в том же $n$-мерном пространстве. Часто необходимо посчитать попарные расстояния между векторами первого и второго множеств. В этом случае можно пользоваться функцией __`scipy.spatial.distance.cdist(XA, XB, metric='euclidean', p=2, ...)`__, где в качестве __`XA, XB`__ необходимо передать две описанные матрицы. Функция возвращает матрицу попарных расстояний размера $m_{A} \times m_{B}$, где элемент матрицы на $[i, j]$-ой позиции равен расстоянию между $i$-тым вектором первого множества и $j$-ым вектором второго множества. # # В данном случае эта функция предподчительнее __`numpy.linalg.norm(...)`__, так как она вычисляет попарные расстояния быстрее и эффективнее. # ## 5. Скалярное произведение и угол между векторами a = np.array([0, 5, -1]) b = np.array([-4, 9, 3]) print('Вектор a:', a) print('Вектор b:', b) # Скалярное произведение в пространстве $\mathbb{R}^{n}$ для двух векторов $x = (x_{1}, \dots, x_{n})$ и $y = (y_{1}, \dots, y_{n})$ определяется как: # # $$ # \langle x, y \rangle = \sum_{i=1}^n x_{i} y_{i}. # $$ # Скалярное произведение двух векторов можно вычислять с помощью функции __`numpy.dot(a, b, ...)`__ или _метода_ __`vec1.dot(vec2)`__, где __`vec1`__ и __`vec2`__ — исходные векторы. Также эти функции подходят для матричного умножения, о котором речь пойдет в следующем уроке. print('Скалярное произведение a и b (через функцию):', np.dot(a, b)) print('Скалярное произведение a и b (через метод):', a.dot(b)) # Длиной вектора $x = (x_{1}, \dots, x_{n}) \in \mathbb{R}^{n}$ называется квадратный корень из скалярного произведения, то есть длина равна евклидовой норме вектора: # # $$ # \left| x \right| = \sqrt{\langle x, x \rangle} = \sqrt{\sum_{i=1}^n x_{i}^2} = \left\Vert x \right\Vert_{2}. # $$ # Теперь, когда мы знаем расстояние между двумя ненулевыми векторами и их длины, мы можем вычислить угол между ними через скалярное произведение: # # $$ # \langle x, y \rangle = \left| x \right| | y | \cos(\alpha) # \implies \cos(\alpha) = \frac{\langle x, y \rangle}{\left|| x |\right| || y ||}, # $$ # # где $\alpha \in [0, \pi]$ — угол между векторами $x$ и $y$. cos_angle = np.dot(a, b) / norm(a) / norm(b) print('Косинус угла между a и b:', cos_angle) print('Сам угол:', np.arccos(cos_angle)) # Более подробно о том, как вычислять скалярное произведение в `NumPy`, # см. [документацию](http://docs.scipy.org/doc/numpy/reference/routines.linalg.html#matrix-and-vector-products).
notebook/vector_operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # # Fuzzingbook Release Notes # # This book comes with version numbers; these correspond to the version numbers in [the Python pip package](Importing.ipynb). # + [markdown] slideshow={"slide_type": "slide"} # ## Version 1.0 (in progress) # # * We now support (but also require) Python 3.9 or later. Earlier versions still required Python 3.6 due to some outdated modules such as `astor` and `enforce` we depended upon (and now don't anymore). # * We added missing dependencies to the `fuzzingbook` pip package (Issue [#44](https://github.com/uds-se/debuggingbook/issues/44) in `debuggingbook`) such that `pip install fuzzingbook` also installs all the packages it depends upon. Thanks to @TheSilvus for reporting this! # * We fixed a warning '.gitignore is a symbolic link' during git checkout ([Issue #43](https://github.com/uds-se/debuggingbook/issues/43)) Thanks to @rjc for reporting this! # + [markdown] slideshow={"slide_type": "slide"} # ## Version 0.9.5 (released 2021-06-08) # # * Lots of minor fixes in HTML generation, adopting new tools and tests from [Debuggingbook](https://www.debuggingbook.org). # * Code functionality should be unchanged. # * The `bookutils` module is now shared with the `debuggingbook` project; some (hopefully neutral) fixes. # * Several typos and other minor fixes throughout the book. # + [markdown] slideshow={"slide_type": "slide"} # ## Version 0.9.0 # # * In the Web version, some not-so-critical details (typically, long implementations and logs) are only shown on demand. This is still work in progress. # * The `fuzzingbook_utils` module used by notebooks is now renamed to `bookutils`. Code and notebooks using `fuzzingbook_utils` may still work, but will issue a deprecation warning. # * Several minor fixes to functionality in [Parsing and Recombining Inputs](Parser.ipynb), [Concolic Fuzzing](ConcolicFuzzer.ipynb), [Symbolic Fuzzing](SymbolicFuzzer.ipynb) # * Better style when printing from browser (colored text, smaller fonts) # * Avoid tracking in YouTube videos # * Several typos and other minor fixes throughout the book # + [markdown] slideshow={"slide_type": "slide"} # ## Version 0.8.0 (released 2019-05-21) # # First numbered fuzzingbook release. # # * Includes [Python pip package](Importing.ipynb). # * Includes _Synopsis_ sections at the beginning of each chapter, highlighting their usage in own code. # * Describes [Tours through the Book](Tours.ipynb). # + [markdown] slideshow={"slide_type": "slide"} # ## Chapter Releases # # Before switching to numbered releases, new chapters were coming out every Tuesday. # # 1. [Introduction to Software Testing](Intro_Testing.ipynb) – 2018-10-30 # 1. [Fuzzing: Breaking Things with Random Inputs](Fuzzer.ipynb) – 2018-10-30 # 1. [Getting Coverage](Coverage.ipynb) – 2018-11-06 # 1. [Mutation-Based Fuzzing](MutationFuzzer.ipynb) – 2018-11-06 # 1. [Fuzzing with Grammars](Grammars.ipynb) – 2018-11-13 # 1. [Efficient Grammar Fuzzing](GrammarFuzzer.ipynb) – 2018-11-20 # 1. [Grammar Coverage](GrammarCoverageFuzzer.ipynb) – 2018-11-27 # 1. [Testing Configurations](ConfigurationFuzzer.ipynb) – 2018-12-04 # 1. [Parsing and Recombining Inputs](Parser.ipynb) – 2018-12-11 # 1. [Probabilistic Grammar Fuzzing](ProbabilisticGrammarFuzzer.ipynb) – 2018-12-18 # 1. [Fuzzing with Generators](GeneratorGrammarFuzzer.ipynb) – 2019-01-08 # 1. [Fuzzing APIs](APIFuzzer.ipynb) – 2019-01-15 # 1. [Carving Unit Tests](Carver.ipynb) – 2019-01-22 # 1. [Reducing Failure-Inducing Inputs](Reducer.ipynb) – 2019-01-29 # 1. [Web Testing](WebFuzzer.ipynb) – 2019-02-05 # 1. [GUI Testing](GUIFuzzer.ipynb) – 2019-02-12 # 1. [Mining Input Grammars](GrammarMiner.ipynb) – 2019-02-19 # 1. [Tracking Information Flow](InformationFlow.ipynb) – 2019-03-05 # 1. [Concolic Fuzzing](ConcolicFuzzer.ipynb) – 2019-03-12 # 1. [Symbolic Fuzzing](SymbolicFuzzer.ipynb) – 2019-03-19 # 1. [Mining Function Specifications](DynamicInvariants) – 2019-03-26 # 1. [Search-Based Fuzzing](SearchBasedFuzzer.ipynb) – 2019-04-02 # 1. [Evaluating Test Effectiveness with Mutation Analysis](MutationAnalysis.ipynb) – 2019-04-09 # 1. [Greybox Fuzzing](GreyboxFuzzer.ipynb) – 2019-04-16 # 1. [Greybox Fuzzing with Grammars](GreyboxGrammarFuzzer.ipynb) – 2019-04-30 # 1. [Fuzzing in the Large](FuzzingInTheLarge.ipynb) – 2019-05-07 # 1. [When to Stop Fuzzing](WhenToStopFuzzing.ipynb) – 2019-05-14 # 1. [Tours through the Book](Tours.ipynb) - 2019-05-21 # # After all chapters were out, we switched to a release-based schedule, with numbered minor and major releases coming out when they are ready.
docs/beta/notebooks/ReleaseNotes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ##s=input().strip().split() # this lets you take input in one line separated by space #num = input() # now you split #s=num.strip().split() num=input() s=num.strip().split() n=int(s[0]) m=int(s[1]) inp = [int(i) for i in input().strip().split()] # the line below isnt right the loop will take input at each step as input() function seeks for input ok? #l=[int(i) for i in input().strip().split()] out=[[inp[i*m+j] for j in range(m)] for i in range(n)] #maxrow=-1 #maxcol=-1 #index1=-1 #index2=-1 isRow = True index = -1 max = -1 for i in range(n): sumcol=0 for j in range(m): sumcol=sumcol + out[i][j] if max<sumcol: # maxcol=sumcol # index1=i max = sumcol index = i for j in range(m): sumrow=0 for i in range(n): sumrow=sumrow + out[i][j] if max<sumrow: # maxrow=sumrow #index2=i max = sumrow index = j isRow = False if isRow: print("row",end=" ") print(index,end=" ") print(max,end=" ") else: print("column",end=" ") print(index,end=" ") print(max,end=" ")
Lecture 4 2D Lists and Numpy/Largest Row or Column-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:Anaconda3] # language: python # name: conda-env-Anaconda3-py # --- # # Saving and restoring in Tensorflow # ## 1. Saving Model: # + import tensorflow as tf save_path = "./" save_file = "model" # Create some variables. # NOTE:It is important to name variables and ops you would like to execute a = tf.Variable([[1,1], [1,1]], dtype=tf.float32, name="a") b = tf.Variable([[2,2], [2,2]], dtype=tf.float32, name="b") c = tf.matmul(a,b, name = "op_matmul") # initialize all variables. init_op = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init_op) output = sess.run(c) print("output: {}" .format(output)) print("a: {}" .format(a.eval())) #saver to save and restore all the variables. saver = tf.train.Saver() saver.save(sess, save_path + save_file) print("Model saved in file: %s" % save_path + save_file) # - # ## 2. Restore Variables # #### Tesnorflow has to know the name of the variables to restore them # + #RESER GRAPH tf.reset_default_graph() #Note: becase the name of the varialbe is not specified # tensorflow will not retore the store values to these variables a = tf.Variable([[0,0], [0,0]], dtype=tf.float32, name="a") b = tf.Variable([[0,0], [0,0]], dtype=tf.float32, name="b") # initialize all variables. init_op = tf.global_variables_initializer() sess = tf.Session() sess.run(init_op) # saver to save and restore all the variables. saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(save_path)) print("a: {}" .format(a.eval(sess))) print("b: {}" .format(b.eval(sess))) # - # ## 3. Restore and assign a specific variable new_saver = tf.train.Saver({'a': b, 'b': a}) new_saver.restore(sess, tf.train.latest_checkpoint(save_path)) print("a: {}" .format(a.eval(sess))) print("b: {}" .format(b.eval(sess))) # If you try to run an op with out restoring the graph you would get # an error like this one # # output = sess.run(c) # print("output: {}" .format(output)) # # '''ValueError: Fetch argument <tf.Tensor 'op_matmul:0' shape=(2, 2) # dtype=float32> cannot be interpreted as a Tensor. # (Tensor Tensor("op_matmul:0", shape=(2, 2), dtype=float32) # is not an element of this graph.) ''' # ## 4. Restore graph and excecute restored ops # In this example we do not need to declare varaibles; they are already initialized within the restored graph(import_meta_graph) # + import tensorflow as tf #RESER GRAPH tf.reset_default_graph() save_path = "./" save_file = "model" sess = tf.Session() saver = tf.train.import_meta_graph(save_path + save_file + '.meta') saver.restore(sess, tf.train.latest_checkpoint(save_path)) #NOTE: It's important to name ops you would like to execute output = sess.run('op_matmul:0') print("output: {}" .format(output))
Tensorflow/How to save and restore Tensorflow .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Top 5 Machine Learning Libraries in Python # In this article we are going to learn about __Numpy__. # ## Numpy # # __[NumPy](http://www.numpy.org/)__ is the fundamental package for scientific computing with Python. It mostly used for solving __matrix__ problems more specifically to solve __Linear Algebra__. When it comes to work with __arrays__ then __Numpy__ is a great choice. # ### Creating a Numpy Array import numpy as np arr = np.array([]) type(arr) # ### Creating One Dimensional Array one_d_array = np.array([1, 2, 3, 4, 5]) print(one_d_array) # ndim attributes shows the number of dimension of an array one_d_array.ndim # size attributes returns the size/length of the array one_d_array.size # ### Creating an array of zeros np.zeros(5) # by default it produce float # zeros() method takes another parameter for data type np.zeros(5, dtype=int) # ### Creating a Sequence of Number # first parameter denotes the starting point # second paramter denotes the ending point # if the third parameter was not specified then 1 is used as default print(np.arange(1, 10)) print(np.arange(1, 10, 2)) np.arange(1, 10) # ### Reshaping an Array np.arange(10).reshape(2, 5) # ### Flatten an Array two_d_arr = np.arange(10).reshape(2, 5) print(two_d_arr) two_d_arr.ravel() np.arange(10).reshape(2, 5).T np.arange(10).reshape(2, 5).T.shape # ### Universal Functions arr = np.arange(10) arr arr.max(), arr.min() # finding max, min np.exp(arr) # finding exponetial np.sqrt(arr) # finding square root # ### Accessing and Iterating Arrays element arr[5] # accessing the 6th element by index for element in arr: print(element) for element in arr: if(element % 2 == 0): print(element, "is positive") else: print(element, "is negative") # ### Linear Algebra _2d_arr = np.array([[10, 20], [30, 40]]) _2d_arr _2d_arr.transpose() # transpose # %%time np.linalg.inv(_2d_arr) # inverse identity = np.eye(3, dtype=int) # 3x3 Identity matrix identity _2d_arr @ _2d_arr # matrix product np.trace(_2d_arr) # trace # ### Stacking a = np.arange(0, 20, 2) a b = np.arange(10) b # vertical stacking vs = np.vstack([a, b]) vs # horizontal stacking hs = np.hstack([a, b]) hs
source codes/Numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # name: python3 # --- # # Chapter 6 # # # Advance Indexing and Numpy IO Operation # # !pip install -q numpy def array_properties(a): print("a = \n", a) print("dim = ", a.ndim) print("shape = ", a.shape) print("datatype = ", a.dtype) print("size = ", a.size) print() # ## Array slicing # # + import numpy as np a5x3 = np.random.randint(0,1000,(5,3)) array_properties(a5x3) # + # print 32 print(a5x3[3,1]) # + # reshape to 15x-1 a15x_ = a5x3.reshape((15,-1)) array_properties(a15x_) # + # reshape to -1x15 a_x15 = a5x3.reshape((-1,15)) array_properties(a_x15) # + # change a5x3 to 1 dimension a53 = a5x3.flatten() array_properties(a53) # + # change a5x3 to 1 dimension # using ravel a53b = a5x3.ravel() array_properties(a53b) # - # ## Class activity # # 1. Create an array a, of shape (5,5,3) of data type float32. # 2. Create an array c, of 1 dimension from array a. # + import numpy as np a5 = np.arange(1,(5+1), dtype=np.float32) array_properties(a5) a5x5 = np.resize(a5,(5,5)) array_properties(a5x5) # + import numpy as np start =1 stop =4 a3 = np.arange(start,(stop), dtype=np.float32) array_properties(a3) a5x5x3 = np.resize(a3,(5,5,3)) array_properties(a5x5x3) # - # ### 1D # + import numpy as np stop = 9 a1 = np.arange(stop) array_properties(a1) print('Elements from first:', a1[0]) print('Elements from first:', a1[-9]) print('Elements from last:', a1[-1]) print('Elements from last:', a1[8]) # + print('Elements from first to last:', a1[0:9]) print('Elements from first to last:', a1[0:]) print('Elements from first to last:', a1[:]) # - print('Elements from index 1 to 6:', a1[1:6]) print('Elements from index 1 to 6:', a1[1:-3]) print('Elements from index 1 to 6:', a1[-8:-3]) print('Even index Elements:', a1[::2]) print('Odd index Elements:', a1[1::2]) # ### 2D # + import numpy as np a2 = np.arange(1,17).reshape(4,4) array_properties(a2) print('Elements from first:', a2[0,0]) print('Elements from last:', a2[3,3]) # - print('Elements in first row:', a2[0,0:4]) print('Elements in first row:', a2[0,:4]) print('Elements in first row:', a2[0,0:]) print('Elements in first row:', a2[0,:]) print('Elements in first row:', a2[0]) print('Elements in last row:', a2[3,0:4]) print('Elements in first row:', a2[3,:4]) print('Elements in first row:', a2[3,0:]) print('Elements in first row:', a2[3,:]) print('Elements in first row:', a2[3]) # + array_properties(a2) print('Elements in rows 2 to 3 and columns 2 to 3:\n', a2[1:3,1:3]) # + import numpy as np a1D = np.arange(10) array_properties(a1D) # + # get 2 to 7 a2i = a1D[2] a3i = a1D[3] a4i = a1D[4] a5i = a1D[5] a6i = a1D[6] a7i = a1D[7] result = np.array([a2i,a3i,a4i,a5i,a6i,a7i]) print(result) # + # get 2 to 7 using slicing a2to7 = a1D[2:8] array_properties(a2to7) # + import numpy as np a2D = np.arange(1,26) a2D = np.resize(a2D, (5,5)) array_properties(a2D) # + # what are the values on row 2 or row index 1 # [6, 7, 8, 9, 10] print(a2D[1,:]) # + # what are the values in rows 2 to 4 rows_2_to_4 = a2D[1:-1,:] array_properties(rows_2_to_4) # - # ||0|1|2|3|4| # |---|---|---|---|---|---| # |0|1|2|3|4|5| # |1|6|7|8|9|10| # |2|11|12|13|14|15| # |3|16|17|18|19|20| # |4|21|22|23|24|25| # + # get these values # [7, 8, 9] # [12, 13, 14] # [17, 18, 19] result = a2D[1:4,1:4] print(result) # - # indexing format is [start : stop : step] # default indexing format is [start=0 : stop=last : step=1] result = a2D[::2,::2] print(result) # indexing format is [start : stop : step] # default indexing format is [start=0 : stop=last : step=1] result = a2D[::3,::2] print(result) # indexing format is [start : stop : step] # default indexing format is [start=0 : stop=last : step=1] result = a2D[1::3,::2] print(result) # ## Boolen and integer indexing # # # We have been using integer for indexing all along in a particular format as [start, stop, step]. # However we can make use of array of integer and boolean (True or False) values as direct indexing value. # + # normal indexing for obtaing the odd and even index of an array import numpy as np a1 = np.arange(9) array_properties(a1) print('Odd index Elements:', a1[1::2]) print('Even index Elements:', a1[::2]) # - # |0|1|2|3|4|5|6|7|8| # |---|---|---|---|---|---|---|---|--| # |0|1|2|3|4|5|6|7|8| # In other to use integer or boolean index the array of integer and array of boolean must have similar dimension to the original array to be accessed. # Using array of integer indexing # + i_odd = np.array([1,3,5,7]) bool_odd = np.array([False, True,False, True,False, True,False, True,False]) print('Odd index Elements:', a1[1::2]) print('Odd index Elements:', a1[i_odd]) print('Odd index Elements:', a1[bool_odd]) # - # ## Class Activity # # 1. Create an array of 1D from 1 to 9 # 2. Print the odd values using normal, integer and boolean indexing. # 3. Print the even values using normal, integer and boolean indexing. # + import numpy as np a9 = np.arange(1,10) array_properties(a9) # + iodd = np.array([0,2,4,6,8]) a_odd = a9[iodd] array_properties(a_odd) # + ieven = np.array([1,3,5,7]) a_even = a9[ieven] array_properties(a_even) # + # get odd value from array of 1 to 9 # using boolean indexing # create array a, of 1 to 9 # create array of boolean of same shape as array a, with desired index equal to True # - # Class Activity # # 1. Write the index for the even number contain in the array (my_arr) below. # - 28 = [0,1], 22 = [0,5] # - 50 = [1,0], 66=[1,2], 92=[1,1], 98=[1,3], 74=[1,6] # - 44, 60, 98 # - 96, 38 # - 8, 66, 92, 62 # 2. Write the index for the number divisible by 5 contain in the array (my_arr) below. # - 75 = [0,0] # - 50 = [1,0] # - 85=[2,2], 35=[2,3], 60=[2,4] # - 55=[3,2] # - # # + import numpy as np my_arr = np.random.randint(1, 100, (5,7)) array_properties(my_arr) # + idiv5 = np.array([ # - 75 = [0,0], # - 50 = [1,0], # - 85= [2,2], # 35= [2,3], # 60= [2,4], # - 55= [3,2] ]) result = my_arr[idiv5] array_properties(result) # + # find modulus of my_arr # the values divisible by 5 will be equal to zero ibooldiv5 = my_arr % 5 array_properties(ibooldiv5) # - # compare each values with zero to and assign back to ibooldiv5 for array element to be in boolean datatype ibooldiv5 = ibooldiv5 == 0 array_properties(ibooldiv5) # use the new boolean array for indexing result = my_arr[ibooldiv5] array_properties(result) # + import numpy as np a2 = np.arange(1,17).reshape(4,4) array_properties(a2) print('Elements in rows 2 to 3 and columns 2 to 3:\n', a2[1:3,1:3]) # - i = np.array([[1,1],[2,2]]) j = np.array([[1,2],[1,2]]) print('Elements in i:\n', a2[i]) print('Elements in j:\n', a2[:,j]) print('Elements in rows 2 to 3 and columns 2 to 3:\n', a2[i,j]) # + i = np.array([[1],[2]]) j = np.array([[1,2]]) print('Elements in i:\n', a2[i]) print('Elements in j:\n', a2[:,j]) print('Elements in rows 2 to 3 and columns 2 to 3:\n', a2[i,j]) # + a2_c = np.full_like(a2,-100) array_properties(a2_c) a2_c[1:3,1:3] = a2[1:3,1:3] print(a2_c) print() a2_bool = a2_c == a2 print(a2_bool) print('Elements in rows 2 to 3 and columns 2 to 3:\n', a2[a2_bool]) # + b1 = np.array([False, True, True, False]) b2 = np.array([False, True, True, False]) print('Elements in b1:\n', a2[b1]) print('Elements in b2:\n', a2[:,b2]) print('Elements in b1,b2:\n', a2[b1,b2]) # - # ## Class Activity # # 1. Create an array of shape (5,6), range of int betweeon 0 and 2000 and # 2. print the even numbers # 3. print the odd numbers # 4. print number divisible by 11. # 5. Seed of randon 12345 # + # 1. Create an array of shape (5,6), range of int betweeon 0 and 2000 and import numpy as np np.random.seed(12345) a5x6 = np.random.randint(0, 2000, (5,6)) array_properties(a5x6) # + # 2. print the even numbers ibool_even = (a5x6 % 2) == 0 array_properties(a5x6[ibool_even]) # + # 3. print the odd numbers ibool_odd = ibool_even == False array_properties(a5x6[ibool_odd]) # - # 4. print number divisible by 11. # + # 5. Seed of randon 12345 # - # ## Working with 3D # + import numpy as np a3 = np.random.randint(0,256,(10,10,3),dtype=np.uint8) array_properties(a3) # - b = a3[:,:,0] array_properties(b) g = a3[:,:,1] array_properties(g) r = a3[:,:,2] array_properties(r) # ### Ravel # + a3_rav = np.ravel(a3) array_properties(a3_rav) # - # ### Reshape # + a3_shp = a3_rav.reshape(10,10,3) array_properties(a3_shp) # + a3_shp = a3_rav.reshape(10,-1,3) array_properties(a3_shp) # + a3_shp = a3_rav.reshape(-1,10,3) array_properties(a3_shp) # - # ## Stacking # Joining array together is stacking. # Array can be joined horizontally (attach to the right or left) or vertically (attach to the bottom ot top) # + import numpy as np a21 = np.random.random((3,4)) array_properties(a21) # + a22 = np.random.random((3,1)) array_properties(a22) # + a23 = np.random.random((2,4)) array_properties(a23) # - print('a21', a21.shape) print('a22', a22.shape) print('a23', a23.shape) # + # both a21 and a22 have same number of rows or there dimension 1 is thesame(3) # threfore they can be horizontally joined. print('a21', a21.shape) print('a22', a22.shape) a2h = np.hstack((a21, a22)) array_properties(a2h) # + # both a21 and a23 have same number of columns or there dimension 2 is thesame(4) # threfore they can be vertically joined. print('a21', a21.shape) print('a23', a23.shape) a2v = np.vstack((a21, a23)) array_properties(a2v) # - # ### `np.concatenate` alternative to hstack # operates along axis=1 # + # alternative to hstack # oth a21 and a22 have same number of rows or there dimension 1 is thesame(3) # threfore they can be horizontally joined. print('a21', a21.shape) print('a22', a22.shape) a2h_a = np.concatenate((a21, a22), axis=1) array_properties(a2h_a) # - # ### `np.concatenate` alternative to vstack # operates along axis=0 the default # + # alternative to vstack # both a21 and a23 have same number of columns or there dimension 2 is thesame(4) # threfore they can be vertically joined. print('a21', a21.shape) print('a23', a23.shape) a2v_a = np.concatenate((a21, a23)) array_properties(a2v_a) # - # ## Splitting # + # horizontal splitting a1 = np.arange(24) array_properties(a1) # split into 3 equal parts. # make sure the array can be splitted into 3 parts, that is multple of three. a1h = np.hsplit(a1,3) print(a1h) # + # split into according to size or shape. # the shape take the number of items in each division. # if after the horizontal splitting has been done, the remaining elements are grouped into another array. a1e = np.hsplit(a1,(4,10)) print(a1e) # - # ## 2D Splitting # + # 2D splitting a2 = np.arange(24).reshape(2,-1) array_properties(a2) a1h = np.hsplit(a2,3) print(a1h) print() a1e = np.hsplit(a2,(4,10)) print(a1e) # + # vertical splitting a2 = np.arange(24).reshape(4,-1) array_properties(a2) a1h = np.vsplit(a2,2) print(a1h) print() a1e = np.vsplit(a2,(1,3)) print(a1e) # - a1e[0] a1e[1] # ## Saving and reading numpy data # # + import numpy as np img1 = np.random.randint(0,256,(10,10,3),dtype='uint8') img2 = np.random.randint(0,256,(10,10,3),dtype='uint8') img3 = np.random.randint(0,256,(10,10,3),dtype='uint8') dataset = [img1, img2, img3] dataLabel = ['img1', 'img2', 'img3'] # - np.savez_compressed('dataset.npz', images=dataset, labels=dataLabel) # + import matplotlib.pyplot as plt plt.figure(figsize=(10,8)) plt.subplot(1,3,1) plt.imshow(img1) plt.subplot(1,3,2) plt.imshow(img2) plt.subplot(1,3,3) plt.imshow(img3) plt.show() # + data = np.load('dataset.npz') imgs = data['images'] labels = data['labels'] print(len(imgs)) print(len(labels)) # + import matplotlib.pyplot as plt plt.figure(figsize=(10,8)) plt.subplot(1,3,1) plt.imshow(imgs[0]) plt.subplot(1,3,2) plt.imshow(imgs[1]) plt.subplot(1,3,3) plt.imshow(imgs[2]) plt.show() # - #
essential/06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 綜合圖型辨識 import numpy as np from keras.datasets import cifar10 import otherLib as olib np.random.seed(10) #fix random result # 載入其他函式庫 import otherLib as olib olib.solve_cudnn_error() # 處理 GPN memory 分配 # 下載資料集 (x_train_image, y_train_label), (x_test_image, y_test_label) = cifar10.load_data() # 影像集大小 print('x_train_image', x_train_image.shape) # > x_train_image (數量, 寬, 高, rgb) print('y_train_label', y_train_label.shape) # > y_train_label (數量,) # 標示 label 意義 label_dict = { 0: "airplane", 1: "automobile", 2: "bird", 3: "cat", 4: "deer", 5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "trunk" } # 查看第一筆影像 olib.plot_prediction(x_train_image, y_train_label, [], 0, dict = label_dict) # 圖片標準化 x_train_image_normalize = x_train_image.astype('float32') / 255.0 x_test_image_normalize = x_test_image.astype('float32') / 255.0 # 查看圖片標準化前後 print(x_train_image[0][0][0]) print(x_train_image_normalize[0][0][0]) # 將標籤做成 One-hot encoding from keras.utils import np_utils y_train_oneHot = np_utils.to_categorical(y_train_label) y_test_oneHot = np_utils.to_categorical(y_test_label) # 建立 CNN 模型 from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D model = Sequential() # 建立捲積層 # filters: filters 數量 (32))) # kernel_size: filters 大小(3*3) # padding: same(補齊周圍,捲積後大小不變) # input_shape: 輸入層大小 (32*32*1) # activation: 線性 model.add(Conv2D( filters = 32, kernel_size = (3,3), padding = 'same', input_shape = (32, 32, 3), activation = 'relu' )) # 建立 Dropout # 隨機放棄 30% 捲積結果 model.add(Dropout(0.3)) # 建立池化層 # pool_size: 池化層大小 (3*3) model.add(MaxPooling2D( pool_size = (3,3) )) # 建立捲積層 2 model.add(Conv2D( filters = 64, kernel_size = (3,3), padding = 'same', activation = 'relu' )) # 建立 Dropout 2 model.add(Dropout(0.3)) # 建立池化層 2 model.add(MaxPooling2D( pool_size = (3,3) )) # 建立捲積層 3 model.add(Conv2D( filters = 128, kernel_size = (3,3), padding = 'same', activation = 'relu' )) # 建立 Dropout 3 model.add(Dropout(0.3)) # 建立池化層 3 model.add(MaxPooling2D( pool_size = (3,3) )) # 建立平坦層 # 建立 Dropout 4 model.add(Flatten()) model.add(Dropout(0.3)) # 建立隱藏層 # units: 1024 層 # activation: 線性函數 model.add(Dense( units = 1024, activation = 'relu' )) model.add(Dropout(0.25)) # 建立輸出層 # units: 10 層 # activation: softmax機率向量 model.add(Dense( units = 10, activation = 'softmax' )) # 顯示剛建立之模型 print(model.summary()) # 定義訓練方法 # loss: categorical_crossentropy 交叉熵 # optimizer: adam # metrics: accuracy 準確率 model.compile( loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'] ) # 開始訓練 # x,y: 輸入值 # validation_split: 0.2 資料作驗證; 0.8做訓練 # epochs: 50 次 # batch_size: 300 筆 train_history = model.fit( x = x_train_image_normalize, y = y_train_oneHot, validation_split = 0.2, epochs = 50, batch_size = 128 ) # loss, accuracy: 訓練資料(0.8)的 loss 與準確度 # val_loss, val_accuracy: 驗證資料(0.2)的 loss 與準確度 # 畫圖 olib.show_train_history(train_history) # 預測準確度 scores = model.evaluate(x_test_image_normalize, y_test_oneHot) print('accuracy=', scores[1]) # 預測值 prediction = np.argmax(model.predict(x_test_image_normalize), axis=-1) prediction # 建立混淆矩陣 import pandas as pd pd.crosstab( y_test_label.reshape(-1), prediction, rownames = ['label'], colnames = ['prediction'] ) # 查看被混淆的值為哪幾筆 (實際2 預測3) df = pd.DataFrame({ 'label': y_test_label.reshape(-1), 'prediction': prediction }) df[(df.label == 2 ) & (df.prediction == 3)] # 查看被混淆的圖片有多醜 olib.plot_prediction(x_test_image, y_test_label, prediction, idx=195, dict = label_dict) # 顯示某項預測機率 predicted_probability = model.predict(x_test_image_normalize) olib.show_Predicted_Probability(y_test_label, prediction, x_test_image, predicted_probability, 0, label_dict) # 儲存本次測試結果 model.save("saveModel/Cifar") print("Save model to disk")
Cifar.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.0 # language: julia # name: julia-1.1 # --- # + include("../../HamiltonODE.jl") using Plots pyplot(); # + d = 2 m = (1) q0 = [1.,0.] p0 = [0.,1.] F(q)= -q U(q)= q⋅q/2 sol(q)=[cos(q),sin(q)] HS = HamiltonSystem(d,m,q0,p0,F,U,sol); # + T=2*2*pi dt = 1e-1 Is = [ Integrator(verlet_step,dt,T), Integrator(euler_step,dt,T), Integrator(i_euler_step,dt,T) ]; # - ts,Qs,Ps = integrate(HS,Is); names = getNames(Is) EnergyErrors = getEnergyErrors(HS,Qs,Ps) MaximumErrors = getMaximumErrors(HS,ts,Qs); plotOrbits(Qs,names,aspect_ratio=:equal) plot(ts,MaximumErrors,label = hcat(names...),title="Maximum Error") plotT(ts,EnergyErrors',names)
Examples/Jupyter/Dahlquist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Polynomial Regression # What if your data doesn't look linear at all? Let's look at some more realistic-looking page speed / purchase data: # + # %matplotlib inline from pylab import * np.random.seed(2) pageSpeeds = np.random.normal(3.0, 1.0, 1000) purchaseAmount = np.random.normal(50.0, 10.0, 1000) / pageSpeeds scatter(pageSpeeds, purchaseAmount) # - # numpy has a handy polyfit function we can use, to let us construct an nth-degree polynomial model of our data that minimizes squared error. Let's try it with a 4th degree polynomial: # + x = np.array(pageSpeeds) y = np.array(purchaseAmount) p4 = np.poly1d(np.polyfit(x, y, 4)) # - # We'll visualize our original scatter plot, together with a plot of our predicted values using the polynomial for page speed times ranging from 0-7 seconds: # + import matplotlib.pyplot as plt xp = np.linspace(0, 7, 100) plt.scatter(x, y) plt.plot(xp, p4(xp), c='r') plt.show() # - # Looks pretty good! Let's measure the r-squared error: # + from sklearn.metrics import r2_score r2 = r2_score(y, p4(x)) print r2 # - # ## Activity # Try different polynomial orders. Can you get a better fit with higher orders? Do you start to see overfitting, even though the r-squared score looks good for this particular data set?
1t_DataAnalysisMLPython/1j_ML/DS_ML_Py_SBO/DataScience/PolynomialRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python3 # --- # # Correlation analysis between regions # - Pearson Correlation # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import math # + df = pd.read_csv('D:\git_space\COVID-19-prediction-project\data\kr_regional_daily.csv') df = df.drop(['date', 'region', 'death', 'released'], axis=1) df_list = df['confirmed'].tolist() # + Seoul = [] Pusan = [] Daegu = [] Incheon = [] Gwangju = [] Daejeon = [] Ulsan = [] Sejong = [] Gyeonggi = [] Gangwon = [] Chungbuk = [] Chungnam = [] Jeonbuk = [] Jeonnam = [] Gyeongbuk = [] Gyeongnam = [] Jeju = [] Overseas = [] for i in range(len(df_list)): if i % 18 == 0: Seoul.append(df_list[i]) elif i % 18 == 1: Pusan.append(df_list[i]) elif i % 18 == 2: Daegu.append(df_list[i]) elif i % 18 == 3: Incheon.append(df_list[i]) elif i % 18 == 4: Gwangju.append(df_list[i]) elif i % 18 == 5: Daejeon.append(df_list[i]) elif i % 18 == 6: Ulsan.append(df_list[i]) elif i % 18 == 7: Sejong.append(df_list[i]) elif i % 18 == 8: Gyeonggi.append(df_list[i]) elif i % 18 == 9: Gangwon.append(df_list[i]) elif i % 18 == 10: Chungbuk.append(df_list[i]) elif i % 18 == 11: Chungnam.append(df_list[i]) elif i % 18 == 12: Jeonbuk.append(df_list[i]) elif i % 18 == 13: Jeonnam.append(df_list[i]) elif i % 18 == 14: Gyeongbuk.append(df_list[i]) elif i % 18 == 15: Gyeongnam.append(df_list[i]) elif i % 18 == 16: Jeju.append(df_list[i]) elif i % 18 == 17: Overseas.append(df_list[i]) # - Seoul = pd.DataFrame(Seoul, columns=['confirmed']) Pusan = pd.DataFrame(Pusan, columns=['confirmed']) Daegu = pd.DataFrame(Daegu, columns=['confirmed']) Incheon = pd.DataFrame(Incheon, columns=['confirmed']) Gwangju = pd.DataFrame(Gwangju, columns=['confirmed']) Daejeon = pd.DataFrame(Daejeon, columns=['confirmed']) Ulsan = pd.DataFrame(Ulsan, columns=['confirmed']) Sejong = pd.DataFrame(Sejong, columns=['confirmed']) Gyeonggi = pd.DataFrame(Gyeonggi, columns=['confirmed']) Gangwon = pd.DataFrame(Gangwon, columns=['confirmed']) Chungbuk = pd.DataFrame(Chungbuk, columns=['confirmed']) Chungnam = pd.DataFrame(Chungnam, columns=['confirmed']) Jeonbuk = pd.DataFrame(Jeonbuk, columns=['confirmed']) Jeonnam = pd.DataFrame(Jeonnam, columns=['confirmed']) Gyeongbuk = pd.DataFrame(Gyeongbuk, columns=['confirmed']) Gyeongnam = pd.DataFrame(Gyeongnam, columns=['confirmed']) Jeju = pd.DataFrame(Jeju, columns=['confirmed']) Overseas = pd.DataFrame(Overseas, columns=['confirmed']) # + def make_diff(dataframe): df_list = dataframe['confirmed'].tolist() diff = [] for i in range(len(df_list)): if i != 0: diff.append(df_list[i] - df_list[i - 1]) sub = [1] diff = sub + diff diff = pd.DataFrame(diff, columns=['confirmed']) return diff def diff_level(dataframe, level): model = dataframe for i in range(level): model = make_diff(model) return model # - Seoul = diff_level(Seoul, 1) Pusan = diff_level(Pusan, 1) Daegu = diff_level(Daegu, 1) Incheon = diff_level(Incheon, 1) Gwangju = diff_level(Gwangju, 1) Daejeon = diff_level(Daejeon, 1) Ulsan = diff_level(Ulsan, 1) Sejong = diff_level(Sejong, 1) Gyeonggi = diff_level(Gyeonggi, 1) Gangwon = diff_level(Gangwon, 1) Chungbuk = diff_level(Chungbuk, 1) Chungnam = diff_level(Chungnam, 1) Jeonbuk = diff_level(Jeonbuk, 1) Jeonnam = diff_level(Jeonnam, 1) Gyeongbuk = diff_level(Gyeongbuk, 1) Gyeongnam = diff_level(Gyeongnam, 1) Jeju = diff_level(Jeju, 1) Overseas = diff_level(Overseas, 1) # ## minmaxscaling # + from sklearn.preprocessing import MinMaxScaler import seaborn as sns corr_df = pd.DataFrame( { 'Seoul': Seoul['confirmed'].values, 'Pusan': Pusan['confirmed'].values, 'Daegu': Daegu['confirmed'].values, 'Incheon': Incheon['confirmed'].values, 'Gwangju': Gwangju['confirmed'].values, 'Daejeon': Daejeon['confirmed'].values, 'Ulsan': Ulsan['confirmed'].values, 'Sejong': Sejong['confirmed'].values, 'Gyeonggi': Gyeonggi['confirmed'].values, 'Gangwon': Gangwon['confirmed'].values, 'Chungbuk': Chungbuk['confirmed'].values, 'Chungnam': Chungnam['confirmed'].values, 'Jeonbuk': Jeonbuk['confirmed'].values, 'Jeonnam': Jeonnam['confirmed'].values, 'Gyeongbuk': Gyeongbuk['confirmed'].values, 'Gyeongnam': Gyeongnam['confirmed'].values, 'Jeju': Jeju['confirmed'].values, 'Overseas': Overseas['confirmed'].values }) # - features = corr_df scaler = MinMaxScaler() scaler.fit(features) df_scaled = scaler.transform(features) corr_df_scaled = pd.DataFrame(data=df_scaled, columns=features.columns) # ## Pearson correlation # + colormap = plt.cm.RdBu plt.figure(figsize = (30, 15)) plt.title('pearson correlation', y = 1.05, size = 24) mask = np.zeros_like(corr_df_scaled.corr()) mask[np.triu_indices_from(mask)] = True sns.heatmap( corr_df_scaled.corr(), mask = mask, linewidths = 0.1, vmax = 1.0, square = True, cmap = colormap, linecolor = 'white', annot = True, ) plt.show() # + def corr_rank(name, data, day): indexes = ['Seoul', 'Pusan', 'Daegu', 'Incheon', 'Gwangju', 'Daejeon', 'Ulsan', 'Sejong', 'Gyeonggi', 'Gangwon', 'Chungbuk', 'Chungnam', 'Jeonbuk', 'Jeonnam', 'Gyeongbuk', 'Gyeongnam', 'Jeju', 'Overseas'] data = data.shift(day)[:len(data)] data.loc[0:day-1, 'confirmed'] = data.loc[day, 'confirmed'] corr_df = pd.DataFrame( { 'Seoul': Seoul['confirmed'].values, 'Pusan': Pusan['confirmed'].values, 'Daegu': Daegu['confirmed'].values, 'Incheon': Incheon['confirmed'].values, 'Gwangju': Gwangju['confirmed'].values, 'Daejeon': Daejeon['confirmed'].values, 'Ulsan': Ulsan['confirmed'].values, 'Sejong': Sejong['confirmed'].values, 'Gyeonggi': Gyeonggi['confirmed'].values, 'Gangwon': Gangwon['confirmed'].values, 'Chungbuk': Chungbuk['confirmed'].values, 'Chungnam': Chungnam['confirmed'].values, 'Jeonbuk': Jeonbuk['confirmed'].values, 'Jeonnam': Jeonnam['confirmed'].values, 'Gyeongbuk': Gyeongbuk['confirmed'].values, 'Gyeongnam': Gyeongnam['confirmed'].values, 'Jeju': Jeju['confirmed'].values, 'Overseas': Overseas['confirmed'].values }) for i in indexes: if i == name: corr_df[i] = data['confirmed'].values """ scale the data from 0 to 1 """ features = corr_df scaler = MinMaxScaler() scaler.fit(features) df_scaled = scaler.transform(features) corr_df_scaled = pd.DataFrame(data=df_scaled, columns=features.columns) corr_df_scaled """ pearson correlation """ corr = corr_df_scaled.corr(method='pearson') abs_data = abs(corr[i]) """ sort the rank of pearson correlation score """ sorted_data = abs_data.sort_values(axis=0, ascending=False) return sorted_data def time_difference(name, data, day): df = pd.DataFrame() for i in range(day + 1): df = pd.concat([df, corr_rank(name, data, i)], axis=1) df.plot.bar(legend=False, figsize=(15,7)) # - corr_rank('Seoul', Seoul, 0) time_difference('Seoul', Seoul, 0) # 증가하는 지표는 target data에 의해 영향을 받게 되는 것 # ## time difference 30 days time_difference('Seoul', Seoul, 30) # 증가하는 지표는 target data에 의해 영향을 받게 되는 것 # ## from overseas time_difference('Daegu', Daegu, 30) # 증가하는 지표는 target data에 의해 영향을 받게 되는 것
correlation_analysis/pearson_correlation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Load libraries import pandas as pd from pandas.tools.plotting import scatter_matrix import matplotlib.pyplot as plt from sklearn import cross_validation from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC # Load dataset url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data" names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class'] dataset = pd.read_csv(url, names=names) print(dataset.shape) print(dataset.head(20)) # box and whisker plot for each attribute dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False) plt.show() dataset.hist() plt.show() scatter_matrix(dataset) plt.show() # Split-out validation dataset array = dataset.values X = array[:,0:4] Y = array[:,4] validation_size = 0.20 seed = 7 X_train, X_validation, Y_train, Y_validation = cross_validation.train_test_split(X, Y, test_size=validation_size, random_state=seed) # Test options and evaluation metric seed = 7 scoring = 'accuracy' # Spot Check Algorithms models = [] models.append(('LR', LogisticRegression())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('KNN', KNeighborsClassifier())) models.append(('CART', DecisionTreeClassifier())) models.append(('NB', GaussianNB())) models.append(('SVM', SVC())) # evaluate each model in turn results = [] names = [] for name, model in models: cv_results=cross_validation.cross_val_score(model, X_train, Y_train, cv=10, scoring=scoring) results.append(cv_results) names.append(name) msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()) print(msg) # Compare Algorithms fig = plt.figure() fig.suptitle('Algorithm Comparsion') ax = fig.add_subplot(111) plt.boxplot(results) ax.set_xticklabels(names) plt.show() knn = KNeighborsClassifier() knn.fit(X_train, Y_train) predictions = knn.predict(X_validation) # + print("Accuracy: %.2f" % accuracy_score(Y_validation, predictions)) indexes = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'] #accuracy_score(Y_validation,predictions)[0:,:] confusion_matrix_df = pd.DataFrame( data = confusion_matrix(Y_validation, predictions), index = indexes, columns = indexes ) print() print(confusion_matrix_df) print() print(classification_report(Y_validation, predictions)) # -
notebooks/pawel_ueb01/01_Introduction_bielski.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # English version please read"Initialize the database and import the data.ipynb" # 本文件用于简单演示如何使用shiningspectrum来创建数据库并导入数据。 #从shiningspectrum中导入database from shiningspectrum import database import os # 如果是首次运行,initialization()函数将会在你的巡行路径下创建一个database_folder文件夹,用于存放所有数据。 # # 如果database_folder文件夹存在,会在database_folder文件夹下根据database_name的值创建一个文件夹专门用于存放指定数据。且会在该文件夹下创建一个database_index_file.p文件作为索引。 # # 您如果想存储不同种类的光谱,请使用initialization()函数来创建对应的数据存储路径和索引。 database.initialization(database_name='raman_database') # import_data()函数会从您指定的data_path路径读入数据,并存储进数据库,请务必保证数据格式正确。example文件夹下的Prepare incoming data我们放了几份展示数据,您可以参考这些数据的格式,主要由一个表头和结尾构成,此外,数据以制表符分割,以换行符分行。 # + data_path=os.getcwd()+"\\Prepare incoming data" database_dictionary = database.import_data(data_path, database_name='raman_database') # - #查看已有数据库 database.existing_database() #查看指定数据库信息 #query_criteria参数可以定义查看方式,'Indexes'是查看数据库收录了什么物质,'survey'是查看收录物质及数据概况 database.view_database('raman_database', query_criteria='Indexes')
examples/初始化数据库并导入数据.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Make A Dataset # # This is a dataset used for experiment no.1 # + # # importing pandas as pd import pandas as pd # list of hall current and hall voltage hall_current = [0.15,0.20,1.04,2.38,3.77,4.59,5.29,6.28,7.43,8.17] hall_voltage = [0,0,0.003,0.006,0.010,0.013,0.015,0.014,0.016,0.016] # dictionary of lists dict = {'Current (in Ampere)': hall_current, 'Hall Voltage (in Volt)': hall_voltage} df = pd.DataFrame(dict) # saving the dataframe df.to_csv('file.csv') # -
make dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="rPh-uurrzprt" # _Lambda School Data Science, Unit 2_ # # # Regression 2 Sprint Challenge: Predict drugstore sales 🏥 # # For your Sprint Challenge, you'll use real-world sales data from a German drugstore chain, from Jan 2, 2013 — July 31, 2015. # # You are given three dataframes: # # - `train`: historical sales data for 100 stores # - `test`: historical sales data for 100 different stores # - `store`: supplemental information about the stores # # # The train and test set do _not_ have different date ranges. But they _do_ have different store ids. Your task is _not_ to forecast future sales from past sales. **Your task is to predict sales at unknown stores, from sales at known stores.** # + colab={} colab_type="code" id="jKOFk_6nzpru" import pandas as pd train = pd.read_csv('https://drive.google.com/uc?export=download&id=1E9rgiGf1f_WL2S4-V6gD7ZhB8r8Yb_lE') test = pd.read_csv('https://drive.google.com/uc?export=download&id=1vkaVptn4TTYC9-YPZvbvmfDNHVR8aUml') store = pd.read_csv('https://drive.google.com/uc?export=download&id=1rZD-V1mWydeytptQfr-NL7dBqre6lZMo') assert train.shape == (78400, 7) assert test.shape == (78400, 7) assert store.shape == (200, 10) # + [markdown] colab_type="text" id="PrvIj4olzprv" # The dataframes have a variety of columns: # # - **Store** - a unique Id for each store # - **DayOfWeek** - integer, 1-6 # - **Date** - the date, from Jan 2, 2013 — July 31, 2015. # - **Sales** - the units of inventory sold on a given date (this is the target you are predicting) # - **Customers** - the number of customers on a given date # - **Promo** - indicates whether a store is running a promo on that day # - **SchoolHoliday** - indicates the closure of public schools # - **StoreType** - differentiates between 4 different store models: a, b, c, d # - **Assortment** - describes an assortment level: a = basic, b = extra, c = extended # - **CompetitionDistance** - distance in meters to the nearest competitor store # - **CompetitionOpenSince[Month/Year]** - gives the approximate year and month of the time the nearest competitor was opened # - **Promo2** - Promo2 is a continuing and consecutive promotion for some stores: 0 = store is not participating, 1 = store is participating # - **Promo2Since[Year/Week]** - describes the year and calendar week when the store started participating in Promo2 # - **PromoInterval** - describes the consecutive intervals Promo2 is started, naming the months the promotion is started anew. E.g. "Feb,May,Aug,Nov" means each round starts in February, May, August, November of any given year for that store # + [markdown] colab_type="text" id="Txb785Qdzprw" # This Sprint Challenge has three parts. To demonstrate mastery on each part, do all the required instructions. To earn a score of "3" for the part, also do the stretch goals. # + [markdown] colab_type="text" id="B9NV3COuzprw" # ## 1. Wrangle relational data, Log-transform the target # - Merge the `store` dataframe with the `train` and `test` dataframes. # - Arrange the X matrix and y vector for the train and test sets. # - Log-transform the target for the train and test set. # - Plot the target's distribution for the train set, before and after the transformation. # # #### Stretch goals # - Engineer 3+ more features. # - #Merge store into train and test then have a quick look train_store = train.merge(store, how = 'left') test_store = test.merge(store, how = 'left') train_store.sample(10) from sklearn.model_selection import train_test_split #Split the data by store id so we can predict on unknown ids trainval_stores = train_store['Store'].unique() train_stores, val_stores = train_test_split(trainval_stores, random_state = 42) train_split = train_store[train_store['Store'].isin(train_stores)] val_split = train_store[train_store['Store'].isin(val_stores)] #Check the split train_split.head() # + #Establish X matrices of features and y vector of target target = 'Sales' X_train = train_split.drop(columns = target) X_val = val_split.drop(columns = target) X_test = test_store.drop(columns = target) y_train = train_split[target] y_val = val_split[target] y_test = test_store[target] # - import numpy as np #Log transform y vectors y_train_log = np.log1p(y_train) y_val_log = np.log1p(y_val) y_test_log = np.log1p(y_test) import matplotlib.pyplot as plt import seaborn as sns #Quick and easy distribution plots to compare them sns.distplot(y_train) sns.distplot(y_train_log) # + [markdown] colab_type="text" id="xiljXNuKzprz" # ## 2. Fit and validate your model # - **Use Gradient Boosting** or any type of regression model. # - **Beat the baseline:** The estimated baseline Root Mean Squared Logarithmic Error is 0.90, if we guessed the mean sales for every prediction. Remember that RMSE with the log-transformed target is equivalent to RMSLE with the original target. Try to get your error below 0.20. # - **To validate your model, choose any one of these options:** # - Split the train dataframe into train and validation sets. Put all dates for a given store into the same set. Use xgboost `early_stopping_rounds` with the validation set. # - Or, use scikit-learn `cross_val_score`. Put all dates for a given store into the same fold. # - Or, use scikit-learn `RandomizedSearchCV` for hyperparameter optimization. Put all dates for a given store into the same fold. # - **Get the Validation Error** (multiple times if you try multiple iterations) **and Test Error** (one time, at the end). # # #### Stretch goal # - Optimize 3+ hyperparameters by searching 10+ "candidates" (possible combinations of hyperparameters). # - #I had to reinstall my local and forgot to install these via commandline, that will be fixed #after the first commit for this notebook # !pip install xgboost # !pip install category_encoders # + colab={} colab_type="code" id="WWNccxI5zprz" from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error, mean_squared_log_error from sklearn.pipeline import make_pipeline import category_encoders as ce #Create easy to use rmse and rmsle def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) def rmsle(y_true, y_pred): return np.sqrt(mean_squared_log_error(y_true, y_pred)) encoder = ce.OrdinalEncoder() # - #Encode X train and val X_train_encoded = encoder.fit_transform(X_train) X_val_encoded = encoder.transform(X_val) #Establish a model to fit to non log transformed data and one for log trasnformed model = XGBRegressor(n_estimators = 1000, n_jobs = -1) model_log = XGBRegressor(n_estimators=1000, n_job=-1) #Established log and non log eval sets for early stopping rounds parameter eval_set = [(X_train_encoded, y_train), (X_val_encoded, y_val)] eval_set_log = [(X_train_encoded, y_train_log), (X_val_encoded, y_val_log)] model.fit(X_train_encoded, y_train, early_stopping_rounds = 50, eval_set = eval_set) model_log.fit(X_train_encoded, y_train_log, early_stopping_rounds = 50, eval_set = eval_set_log) #Make non log predictions y_pred = model.predict(X_val_encoded) #Make log predictions and print the comparison y_pred_log = model_log.predict(X_val_encoded) print(f'RMSE: {rmse(y_val_log,y_pred_log)}') print(f'RMSLE: {rmsle(y_val, y_pred)}') # + [markdown] colab_type="text" id="JGqeEpRmzpr1" # ## 3. Plot model interpretation visualizations # - Choose any one of these options: # - Permutation Importances plot # - Partial Dependency Plot, 1 feature isolation # - Partial Dependency Plot, 2 feature interaction # # #### Stretch goals # - Plot 2+ visualizations. # - Use permutation importances for feature selection. # + colab={} colab_type="code" id="ZeOJFJJZzpr1" # !pip install pdpbox # - # !pip install eli5 # + import eli5 from eli5.sklearn import PermutationImportance permuter = PermutationImportance(model, scoring='neg_mean_squared_error', cv='prefit', n_iter=5, random_state=42) # - #fill na for permuter X_val_no_na = X_val_encoded.fillna(0) #Fit permuter and get importances for pdp plot permuter.fit(X_val_no_na, y_val_log) feature_names = X_val_encoded.columns.tolist() eli5.show_weights(permuter, top=None, feature_names=feature_names) # + from pdpbox.pdp import pdp_isolate, pdp_plot #Create a pdpplot using the Customers feature feature = 'Customers' isolated = pdp_isolate(model = model, dataset = X_val_encoded, model_features = X_val_encoded.columns, feature = feature) # - pdp_plot(isolated, feature_name = feature)
DS_Sprint_Challenge_8_Regression_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # %reload_ext autoreload # %autoreload 2 # + import os from pathlib import Path import skimage.external.tifffile as tiff from resources.conv_learner import * # important because our ResNet name needs to override theirs from resources.plots import * from common import Statistics, dataset_source from models import ResNet # - DATA = '../datasets/yeast_v4.1' data_path = Path(DATA) SIZE = 200 classes = Statistics.source_class(data_path) train_val = zip(classes['train'],classes['val']) test = zip(classes['test']) train_val_stats = Statistics.per_class(train_val) train_val_stats test_stats = Statistics.per_class(test) test_stats lbls = {'Cit1_MC_WT': 0, 'Cit1_MC_mfb1KO': 1, 'Cit1_MC_mfb1KO_mmr1KO': 2, 'Cit1_MC_mmr1KO': 3} train_val_stats = {lbls[key]:value for key, value in train_val_stats.items()} test_stats = {lbls[key]:value for key, value in test_stats.items()} train_val_stats test_stats test_norm = Normalize(test_stats) test_denorm = Denormalize(test_stats) val_crop = CropType.RANDOM test_tfms = image_gen(test_norm, test_denorm, SIZE,crop_type=val_crop) def get_data(path: str, sz, bs): create, lbl2index = ImageClassifierData.prepare_from_path(path, val_name='val',test_name='test',test_with_labels=True, bs=bs) tfms = tfms_from_stats(train_val_stats, sz, aug_tfms=[RandomDihedral()], pad=sz//8) #even without transformations and padding -> failure print('\n class to index mapping:\n',lbl2index) tfms += (test_tfms,) return create(tfms) data = get_data(DATA,SIZE, 64) data.val_dl data.trn_dl xs ,ys = next(iter(data.test_dl)) idx = 63 x = xs[idx].cpu().numpy().copy() y = ys[idx] tiff.imshow(x[1]) np.mean(x[1]) trn_xs ,trn_ys = next(iter(data.trn_dl)) idx = 21 trn_x = trn_xs[idx].cpu().numpy().copy() trn_y = trn_ys[idx] tiff.imshow(trn_x[1]) trn_y
YNet_dev/Generalization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 181215 import urllib.request import urllib.parse import sys import io # 행정안전부의 새소식 가져오기 # + # 행정안전부 API = "http://www.mois.go.kr/gpms/view/jsp/rss/rss.jsp" values = { 'ctxCd': '1001' } params = urllib.parse.urlencode(values) print('Before',values) params = urllib.parse.urlencode(values) print('After : ',params) # + url = API + "?" + params print("요청 url=", url) # 100까지만 read해옴 data = urllib.request.urlopen(url).read(100) text = data.decode("utf-8") print('출력\n',text)
section2/2-3-3-download.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="eOhPqC6fysD4" # # **ArtLine** # **Create** **Amazing** **Line** **Art**. # + colab={"base_uri": "https://localhost:8080/"} id="xzHW4dq4ys7_" outputId="0cb3a9b8-2547-4f26-bd31-5e5cdfb0df79" # !git clone https://github.com/aniketspeaks/ArtLine.git ArtLine # + id="Ic1K3IP82_Hw" outputId="e71d314e-bcc3-4422-b113-81320c5c73f7" colab={"base_uri": "https://localhost:8080/"} # cd ArtLine # + id="tUjENOE105Ph" outputId="b03fb964-3c35-443e-a183-c29ab4b9cb33" colab={"base_uri": "https://localhost:8080/"} # !pip install -r colab_requirements.txt # + [markdown] id="2cjGDScH86iU" # # **Runtime** # # * Hardware Accelerator = GPU # # + id="qnC6OObV3sNk" import fastai from fastai.vision import * from fastai.utils.mem import * from fastai.vision import open_image, load_learner, image, torch import numpy as np import urllib.request import PIL.Image from io import BytesIO import torchvision.transforms as T from PIL import Image import requests from io import BytesIO import fastai from fastai.vision import * from fastai.utils.mem import * from fastai.vision import open_image, load_learner, image, torch import numpy as np import urllib.request import PIL.Image from io import BytesIO import torchvision.transforms as T class FeatureLoss(nn.Module): def __init__(self, m_feat, layer_ids, layer_wgts): super().__init__() self.m_feat = m_feat self.loss_features = [self.m_feat[i] for i in layer_ids] self.hooks = hook_outputs(self.loss_features, detach=False) self.wgts = layer_wgts self.metric_names = ['pixel',] + [f'feat_{i}' for i in range(len(layer_ids)) ] + [f'gram_{i}' for i in range(len(layer_ids))] def make_features(self, x, clone=False): self.m_feat(x) return [(o.clone() if clone else o) for o in self.hooks.stored] def forward(self, input, target): out_feat = self.make_features(target, clone=True) in_feat = self.make_features(input) self.feat_losses = [base_loss(input,target)] self.feat_losses += [base_loss(f_in, f_out)*w for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)] self.feat_losses += [base_loss(gram_matrix(f_in), gram_matrix(f_out))*w**2 * 5e3 for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)] self.metrics = dict(zip(self.metric_names, self.feat_losses)) return sum(self.feat_losses) def __del__(self): self.hooks.remove() # + id="qmLIGUuu3vp5" MODEL_URL = "https://www.dropbox.com/s/starqc9qd2e1lg1/ArtLine_650.pkl?dl=1" urllib.request.urlretrieve(MODEL_URL, "ArtLine_650.pkl") path = Path(".") learn=load_learner(path, 'ArtLine_650.pkl') # + [markdown] id="Fxq60IQv_UEV" # # **URL** # Type in a url to a direct link of an **high quality image**. Usually that means they'll end in .png, .jpg, etc. # # **Note** : Works well with **portrait photos having good lighting and plain background**. But you're free to explore. # + [markdown] id="jcRgSwcBCAig" # Link to high-quality portrait pics. Click on the image, let it expand and then copy image address. # # https://www.freepik.com/search?dates=any&format=search&from_query=Portrait&page=1&query=Portrait&sort=popular&type=photo # + colab={"base_uri": "https://localhost:8080/", "height": 310} id="DjlZYFlZcK6Q" cellView="form" outputId="dfcc9a8c-20fc-4ee1-fa67-3c0197e12dc8" url = 'https://wallpaperaccess.com/full/503035.jpg' #@param {type:"string"} response = requests.get(url) img = PIL.Image.open(BytesIO(response.content)).convert("RGB") img_t = T.ToTensor()(img) img_fast = Image(img_t) show_image(img_fast, figsize=(8,8), interpolation='nearest'); # + [markdown] id="UE4iRsswAaTY" # # **Output** # + id="Z17mERI63sRx" colab={"base_uri": "https://localhost:8080/", "height": 483} outputId="f96273a3-6fa3-458a-e8d0-934a14018d0b" p,img_hr,b = learn.predict(img_fast) Image(img_hr).show(figsize=(8,8)) # + [markdown] id="DqGbPjLDC7hM" # # **Recommended image sources** # + [markdown] id="2Rx4imphDA3T" # https://www.freepik.com/search?dates=any&format=search&from_query=Portrait&page=1&query=Portrait&sort=popular&type=photo # # https://www.pexels.com/search/portrait%20man/ # # https://www.flickr.com/search/?user_id=37277626%40N07&sort=date-taken-desc&safe_search=1&view_all=1&tags=portrait # # # #
ArtLine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp blocks.holidays # + # export import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F torch.Tensor.ndim = property(lambda x: x.dim()) # - # # Holidays # > Holidays functions # + # export class Holiday(nn.Module): def __init__(self, holiday, repeat_every=365, mean=0, scale=1): super().__init__() self.holiday = (holiday - mean) / scale self.repeat_every = repeat_every / scale self.w = nn.Parameter(torch.zeros(1)+0.05) def forward(self, t): rem = torch.remainder(t - self.holiday, self.repeat_every) return (rem == 0).float() * self.w class HolidayRange(nn.Module): def __init__(self, holidays): """ holidays: list of lists containing lower and upper bound of hols """ super().__init__() self.holidays = holidays self.w = nn.Parameter(torch.zeros(1)+0.05) def forward(self, t): bounded = [(l<=t) & (t<=u) for l,u in self.holidays] return sum(bounded).float()*self.w # - # hide from nbdev.export import * notebook2script()
Holidays.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Importing the required libraries # + # %matplotlib inline from collections import Counter from wordcloud import WordCloud import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import csv import nltk import string import re from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split from sklearn.decomposition import TruncatedSVD from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix, f1_score,classification_report from sklearn.model_selection import GridSearchCV from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.naive_bayes import MultinomialNB from sklearn.naive_bayes import BernoulliNB from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neural_network import MLPClassifier import spacy import it_core_news_sm from nltk.stem.snowball import SnowballStemmer # + def generate_wordclouds(X_tfidf, y_test, word_positions): class_ids = set(y_test) top_count = 100 for class_id in class_ids: # compute the total tfidf for each term in the cluster tfidf = X_tfidf[y_test == class_id] tfidf_sum = np.sum(tfidf, axis=0) # numpy.matrix tfidf_sum = np.asarray(tfidf_sum).reshape(-1) top_indices = tfidf_sum.argsort()[-top_count:] term_weights = {word_positions[idx]: tfidf_sum[idx] for idx in top_indices} wc = WordCloud(width=1200, height=800, background_color="white") wordcloud = wc.generate_from_frequencies(term_weights) fig, ax = plt.subplots(figsize=(5, 5), dpi=100) ax.imshow(wordcloud, interpolation='bilinear') ax.axis("off") fig.suptitle(f"Class {class_id}") return class_ids class LemmaStemTokenizer(object): def __init__(self): self.lemmatizer = it_core_news_sm.load(disable=["parser"]) self.stemmer = SnowballStemmer("italian") #self.lemmatizer = spacy.load("it_core_news_sm",disable=["parser"]) def __call__(self, document): lemmas = [] rmv_punct = str.maketrans('', '', string.punctuation) parts_of_speech = ['DET','PRON','CONJ','SCONJ','X','SPACE'] for token in self.lemmatizer(document): lemma = token.lemma_ # search punctation in a string and clean if re.search(r'[^\w\s]', lemma) and re.search(r'[\w^\s]', lemma): lemma = lemma.translate(rmv_punct) if len(lemma) > 2: token = self.lemmatizer(lemma.translate(rmv_punct))[0] lemma = token.lemma_ # filter out any tokens not containing significant value(e.g.raw punctuation) if lemma not in string.punctuation and \ len(lemma) > 2 and \ len(lemma) < 20 and \ token.pos_ not in parts_of_speech and \ not re.search(r'\d', lemma): lemma = self.stemmer.stem(lemma) lemmas.append(lemma) return lemmas # - # # Reading and Extracting data from .csv files df_dev = pd.read_csv('development.csv')[['text','class']] df_eval = pd.read_csv('evaluation.csv') # # Data exploration # + # Dim dataset df = pd.concat([df_dev, df_eval], sort=False) print("Dimensions datasets\n",len(df_dev), len(df_eval), len(df)) # Controll if text column has empty cells print("\nEmpty text values\n",df["text"].isna().any(axis=0)) # Controll if class column has empty cells print("\nEmpty class values\n",df_dev["class"].isna().any(axis=0)) # - # The count of each of the two classes # 67% / 33% imbalance fig2 = sns.countplot(x= 'class',data = df_dev) plt.title('Label Counts') plot = fig2.get_figure() plot.savefig('count_plot.png') # + # Average Word Length vs class df_dev['length'] = df_dev['text'].apply(len) fig1 = sns.barplot('class','length',data = df_dev,palette='PRGn') plt.title('Average Word Length vs label') plot = fig1.get_figure() plot.savefig('barplot_avg_word.png') # - # # Feature Engineering lemmaStemTokenizer = LemmaStemTokenizer() stopword = ['aver','esser','all','dal','dall','del','nel','nell','sul','sull','dell','per','poi'] vectorizer = TfidfVectorizer(tokenizer=lemmaStemTokenizer, stop_words = stopword, ngram_range = (1,2), strip_accents="ascii", lowercase = True, max_df=0.25, min_df=3) # + train_valid_mask = ~df["class"].isna() X = df.drop(columns=["class"]).values y = df["class"].values X_tfidf = vectorizer.fit_transform(X.ravel()) # len(vectorizer.get_feature_names()) # - X_train_valid = X_tfidf[train_valid_mask.to_numpy().nonzero()] y_train_valid = y[train_valid_mask] X_eval = X_tfidf[(~train_valid_mask).to_numpy().nonzero()] word_positions = {v: k for k, v in vectorizer.vocabulary_.items()} generate_wordclouds(X_train_valid, y_train_valid, word_positions) # # Model Selection and Machine Learning # + # Let’s experiment many classifiers. models = [ MultinomialNB(), BernoulliNB(), LogisticRegression(), SGDClassifier(), LinearSVC(), SVC(), RandomForestClassifier(), MLPClassifier() ] # Init a dictionary for storing results of each run for each model results = { model.__class__.__name__: { 'accuracy': [], 'f1_score': [], 'confusion_matrix': [] } for model in models } # K-Fold with 5 splits kfold = KFold(n_splits=5, shuffle=True) for train_indices, test_indices in kfold.split(X_train_valid, y_train_valid): # Prepare splits X_train_res = X_train_valid[train_indices] # Use fancy indexing to extract data y_train_res = y_train_valid[train_indices] X_test = X_train_valid[test_indices] y_test = y_train_valid[test_indices] for model in models: model.fit(X_train_res, y_train_res) y_pred = model.predict(X_test) results[model.__class__.__name__]['accuracy'].append(accuracy_score(y_test, y_pred)) results[model.__class__.__name__]['f1_score'].append(f1_score(y_test, y_pred,average='weighted')) # + # Print result metrics = [] for model, d in results.items(): metrics.append([model,"acc",(sum(d['accuracy']) / len(d['accuracy']) * 100)]) metrics.append([model,"f1",(sum(d['f1_score']) / len(d['f1_score']) * 100)]) metrics = np.array(metrics) metrics = pd.DataFrame({'model': metrics[:, 0], 'metric': metrics[:, 1], 'value': metrics[:, 2]}) metrics['value'] = metrics['value'].astype(float) plt.figure(figsize=(15,5)) plt.xlim(75,100) plt.grid(color='grey', linestyle='--', linewidth=2) fig3 = sns.barplot(x="value", y="model", hue="metric", data=metrics) plt.title('Performance Algorithm') plot = fig3.get_figure() plot.savefig('plot_performance.png') # + # The best classifier is the Linear Support Vector Classification X_train, X_test, y_train, y_test = train_test_split(X_train_valid, y_train_valid, test_size=0.2,stratify=y_train_valid) svc = LinearSVC() svc.fit(X_train, y_train) # - # Validate model y_test_pred = svc.predict(X_test) # + # Precision, recall, f1, support: for each class acc = accuracy_score(y_test,y_test_pred) f1 = f1_score(y_test,y_test_pred,average='weighted') p, _, _, _ = precision_recall_fscore_support(y_test,y_test_pred) print(f"Accuracy = {acc:.3f}") print(f"f1_weighted_score = {f1:.3f}") print(classification_report(y_test_pred,y_test)) # + # Build the confusion matrix conf_mat = confusion_matrix(y_test, y_test_pred) # Plot the result label_names = np.arange(p.shape[0]) conf_mat_df = pd.DataFrame(conf_mat, index = label_names, columns = label_names) conf_mat_df.index.name = 'Actual' conf_mat_df.columns.name = 'Predicted' fig = sns.heatmap(conf_mat_df, annot=True, cmap='GnBu', annot_kws={"size": 16}, fmt='g', cbar=False) plt.show() plot = fig.get_figure() plot.savefig('confusion_matrix.png') # - # # Tuning and Validation # + # gridsearch param_grid = [ {'C': [1, 1.5, 2, 2.5, 3, 3.5], 'penalty' : ['l1','l2'], 'tol':[1e-01,1e-02,1e-03], 'multi_class': ['ovr'], 'class_weight' : ['balanced', None], 'random_state' : [42] } ] gs = GridSearchCV(LinearSVC(), param_grid, scoring="f1_weighted", n_jobs=-1, cv=5) gs.fit(X_train_valid, y_train_valid) y_eval = gs.predict(X_eval) gs.best_score_,gs.best_params_ # - # print result pd.DataFrame(y_eval, index=df[~train_valid_mask].index).to_csv("output.csv",index_label="Id", header=["Predicted"])
code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example # # First you have to start the visdom server # ``` # python -m visdom.server -port 8080 # ``` # and go to `http://localhost:8080/`. # # Then you can run the following code and get a line plot and an image plot. # # After starting the code you have to select `my_environment` in the environment dropdown in your browser. Maybe you have to refresh the page to see it. # ## Plot a value import numpy # + from trixi.logger import NumpyVisdomLogger as Nvl from time import sleep import numpy as np nvl = Nvl(name="my_environment") x = 0 for i in range(50): y = np.sin(x) #Add one value to the plot. Setting x-axis values is currently not supported. nvl.show_value(y, name="sin plot") x += 0.1 sleep(0.1) # - # ## Plot image and text # + from trixi.logger import NumpyVisdomLogger as Nvl from time import sleep import numpy as np from skimage.data import camera, checkerboard, chelsea, astronaut nvl = Nvl(name="my_environment") images = [astronaut(), camera(), checkerboard(), chelsea()] for i in range(50): img = images[i%len(images)] #use modulo to cycle images #color channel has to be in first dimension if len(img.shape) == 3: img = img.transpose((2,0,1)) #move color channel from last to first dimension nvl.show_image(img, name="img plot", title="image title") nvl.show_text("Iteration index: {}".format(i), name="text plot") sleep(0.5) # - # ## Plot image grid # + from trixi.logger import NumpyVisdomLogger as Nvl from time import sleep import numpy as np from skimage.data import camera, checkerboard, chelsea, astronaut, clock, coffee, coins, horse, moon, rocket nvl = Nvl(name="my_environment") #Prepare images images = [astronaut(), camera(), checkerboard(), chelsea(), clock(), coffee(), coins(), horse(), moon(), rocket()] images_converted = [] for img in images: img = img[:200,:200] #make all images have same dimensions if len(img.shape) == 3: img = img[:,:,0] #if rgb image use only one channel images_converted.append(img) images_converted = np.array(images_converted) s = images_converted.shape # (10, 200, 200) images_converted = np.reshape(images_converted, (s[0], 1, s[1], s[2])) # images need to have channel dimension in the beginning. New shape: (10, 1, 200, 200) #Show images nvl.show_images(images_converted, name="img grid plot", title="image grid title") # - # # Plot Histogram # + from trixi.logger import NumpyVisdomLogger as Nvl from time import sleep import numpy as np import random nvl = Nvl(name="my_environment") values = [random.gauss(0, 1)] for i in range(500): values.append(random.gauss(0, 1)) nvl.show_histogram(np.array(values), name="histogram plot") sleep(0.1) # - # ## Plot 3D Histogram # + from trixi.logger import NumpyVisdomLogger as Nvl from time import sleep import numpy as np import random nvl = Nvl(name="my_environment") for i in range(5): new_ar = np.random.normal(0, 1, 2000) nvl.show_histogram_3d(new_ar, name="histogram 3D plot") # will multiply values by 100 to make plots better visible sleep(0.1) # -
examples/numpy_visdom_logger_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Write csv # http://pandas.pydata.org/pandas-docs/stable/index.html data3.to_csv('C:\\Trees\\Example_from_pandas_to_ csv_comma.csv', sep=',') pd.set_option('display.max_columns', 50) print(df.info()) print(df.columns) print(df.shape) df.describe(include=['object', 'bool']) # # Operations with df data3.filter(items=['age', 'marital_status']) data3.filter(like="aaa") df[df['Churn'] == 1].mean() pd.crosstab(df['Churn'], df['Voice mail plan'], normalize=True) df.pivot_table(['Total day calls', 'Total eve calls', 'Total night calls'], ['Area code'], aggfunc='mean').head(10) df.apply(np.max) data3.sort_values(by='age', ascending=0) print(data3['job_position'].unique()) featurescat=list(set(data.columns)- set(data._get_numeric_data().columns)) for i, feat in enumerate (featurescat) : print(data[feat].unique()) dff.groupby('B').filter(lambda x: len(x['C']) > 2) data_num=data.get_numeric_data() # Разбиваем все переменные на две группы – # категориальные и количественные categorical_columns = [c for c in data3.columns if data3[c].dtype.name == 'object'] numerical_columns = [c for c in data3.columns if data3[c].dtype.name != 'object'] data3[categorical_columns].describe() data3.corr() df['Area code'].value_counts(normalize=True) data3.rename(columns={'living_region':'region'}, inplace=True) # # Operations with variables data3['age'].fillna(data3['age'].mean(), inplace=True) for i in ['credit_count', 'overdue_credit_count']: if i in data3.columns: data3[i]=data3[i].astype('int64') # + featurescat=list(set(data.columns)- set(data._get_numeric_data().columns)) for i, feat in enumerate (featurescat) : data_c[feat]=pd.factorize(data_c[feat])[0] # - data3.groupby('job')['monthly_income'].mean() data3.drop('client_id', axis=1, inplace=True) # Select low-correlation variables def correlation(dataset, threshold): col_corr = set() # Set of all the names of deleted columns corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if corr_matrix.iloc[i, j] >= threshold: colname = corr_matrix.columns[i] # getting the name of column col_corr.add(colname) if colname in dataset.columns: del dataset[colname] # deleting the column from the dataset print(dataset) # # Data preprocessing # + active="" # data5.drop_duplicates() # - # С помощью метода str.lstrip удалим ненужный # символ подчеркивания, с которого начинается # несколько значений переменной Клиент, # метод str.lstrip возвращает копию указанной # строки, с начала (слева l — left) которой # устранены указанные символы data5['Клиент']=data5['Клиент'].str.lstrip('_') data5 # С помощью метода str.rstrip удалим ненужные # символы, которыми заканчиваются некоторые # значения переменной Возраст, метод str.rstrip # возвращает копию указанной строки, с конца # (справа r — right) которой устранены указанные # символы data5['Возраст']=data5['Возраст'].str. rstrip('&лет') data5 # Удаляем последние 3 символа в каждом значении # переменной Регион data5['Регион'] = data5['Регион'].map(lambda x: str(x)[:-3]) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X=scaler.fit_transform(X) # # Feature eng df['color'] = [1 if x < 1 else 0 for x in df['A']] data3['region_cnt'] = data3['living_region']. map(data3['living_region'].value_counts()) data3.head() def code_mean(data, cat_feature, real_feature): return (data[cat_feature].map(data.groupby( cat_feature)[real_feature].mean())) # Создаем переменную, у которой каждое значение – # среднее значение monthly_income в категории # переменной living_region data3['region_mean_income'] = code_mean(data3, 'living_region', 'monthly_income') data3.head() # Пишем функцию, которая создает переменную # в результате конъюнкции переменных # feature1 и feature2 def make_conj(data, feature1, feature2): data[feature1 + ' + ' + feature2] = data[feature1].astype(str) + ' + ' + data[feature2].astype(str) return (data) make_conj(data3, 'education', 'marital_status') data3.head() data3['log_income'] = np.log(data3['monthly_income']) data3['mean_age_tenure'] = data3[['age','credit_month']].mean(axis=1) data3['income_decile'] = pd.qcut(data3['monthly_income'], 10, labels=False) # Создаем новую переменную retired, которая # принимает значение «Yes», если значение # переменной age больше 60, и значение «No» # в противном случае data3['retired'] = np.where(data3['age']>=60, 'Yes', 'No')
Pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example # Öncelikle "Pandas Kütüphanesini" ekleyerek başlıyoruz. Pandas kütüphanesi kullanımı kolay veri yapıları ve veri analiz araçları sağlayan açık kaynaklı bir kütüphanedir. Csv ve text dosyalarını açmaya ve içerisinde bulunan verileri okuyarak istenen sonuca kolayca ulaşmak için kullanılmaktadır. # import pandas as pd # Pandas farklı veri formatlarını okuamaya olanak sağlar. Örneğimizde json formatınd bir veriyi okuyacağız. # Verimiz df isminde bir DataFrame nesnesine dönüştü. df = pd.read_json("https://data.smcgov.org/resource/mb6a-xn89.json") # df nesnemiz hakkında fikir edinelim. Toplamda kaç satır, sütun olduğunu, sütunlarımızın isimlerini,veri tiplerini, bellekte ne kadar yer kapladığı gibi bilgiler gözükmektedir. df.info() # Verimizin ilk beş satırını görmek için head() fonksiyonu kullanılmıştır. df.head(5) # shape fonskiyonu satır ve sütun sayısını döndürür. df.shape # Şimdide df nesnemizin sütunlarının isimlerini ve veri tiplerini öğrenelim. Normalde info() fonksiyonu ile bu bilgiye ulaşmıştık. ANcak ben kalabalık olması ve bu bilgide işe yarayabilir mantığı ile bu fonksiyonları da ekledim. df.columns df.dtypes # Describe fonksiyonu,veriden sayısal sütunlarla ilgili istatikleri döndürür, özet bilgi diyebiliriz. describe() sayısal verilere sahip olan sütunların max, min , std…gibi istatiksel değerlerini döndürür. df.describe() # loc fonksiyonu istediğiniz satırları (veya sütunları) getirmek için kullanılır. df.loc[[1,2,3],["geography_type","year","bachelor_s_degree_or_higher"]] # Bir sütunu groupby() fonksiyonu ile gruplayarak özet tablo oluşturabilir. Örneğin coğrafya türüne göre gruplayıp ortalama bulalım. df.groupby("geography_type").mean() # Lise mezunu oranının 10'dan büyük, Lisans derecesi veya daha yüksek oranının 8.7'den küçük olan verilerin getirilmesi. print(df[ (df["high_school_graduate"]>10.00) & (df["bachelor_s_degree_or_higher"]<8.7)]) # Şimdi elimizdeki veri için basit bir fonksiyon yazalım. Bu fonksiyon ile verimizdeki kişilerin yaşağıdı coğrafya türüne göre sayısal değer döndüren bir fonksiyon yazalım. def Neresi(yer): if yer == "City": return 1 elif yer =="CDP": return 2 else: return 0 # Fonkisyonu verimizde kullanmak için Pandasın apply() fonksiyonunu kullanalım. Ayrıca bu fonksiyondan döndürdüğümüz verileri bir sütuna ekleyeceğiz. df['nerede_yasiyor'] = df.geography_type.apply(Neresi) # Verimizdeki kişilerin 15inin ilde , 5inin ilçede yaşadğı görülmektedir. df.nerede_yasiyor.value_counts() # Pandas kütüphanesi "matplotlib" tabanlı çizdirme fonksiyonlarına sahiptir. import matplotlib.pyplot as plt df.less_than_high_school_graduate.plot(style="r.-",title="Lise Mezunundan Daha Az")
BasicExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:covid_nlp] # language: python # name: conda-env-covid_nlp-py # --- # If you haven't yet, start by setting up your environment and datasets by following the instructions in the README. It should be something like: # * `make create_environment` # * `conda activate covid_nlp` # * `make update_environment` # * `make data` # # Several common packages that you may want to use (e.g. UMAP, HDBSCAN, enstop, sklearn) have already been added to the `covid_nlp` environment via `environment.yml`. To add more, edit that file and do a: # ` make update_environment` # Quick cell to make jupyter notebook use the full screen width from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # Automatically pick up code changes in the `src` module # %load_ext autoreload # %autoreload 2 import json import pandas as pd # Useful imports from easydata from src import paths from src.data import Dataset from src import workflow # ## Load up the dataset # # The metadata has been augmented with where the files can be found relative to `paths["interim_data_path"]` # + #paths['interim_data_path'] # - workflow.available_datasets() # If the previous cell returned an empty list, go back and re-run `make data` as described at the top of this notebook. ds_name = 'covid_nlp_20200319' # Load the dataset meta_ds = Dataset.load(ds_name) print(meta_ds.DESCR[:457]) # The processed dataframe is the `data` method of this data source meta_df = meta_ds.data meta_df.head() # ## Basics on the dataset # # The JSON files given in the `path` column of the metadata dataframe are the papers in `json` format (as dicts) # that include the following keys: # * `paper_id` # * `metadata` # * `abstract` # * `body_text` # * `bib_entries` # * `ref_entries` # * `back_matter` # # where the `paper_id` is the sha hash from the medadata. # # For example: filename = paths['interim_data_path'] / ds_name / meta_df['path'][0] file = json.load(open(filename, 'rb')) file.keys() # # Pre-processing data for various embeddings # ### Example 1: If you want to start with abstracts... # e.g. if you want to reproduce the analysis in # https://gitlab.com/ar2a/covid19-kaggle/-/blob/master/notebooks/gpclend_embed_abstracts.ipynb (you will be able to pick up this notebook from **Point ranking (will be used later)**) then do this: abstracts = meta_df.abstract.dropna() abstracts[:5] len(abstracts) # ## Example 2: If you want to split up documents by their sections... # # If you want to produce similar analyses to: # * https://gitlab.com/ar2a/covid19-kaggle/-/blob/master/notebooks/mpfrane-scispacy-tokenization.ipynb (the processing below will take care of everything up to: **Apply scispacy tokenization**) # * https://gitlab.com/ar2a/covid19-kaggle/-/blob/master/notebooks/top2vec_corona_dangel.ipynb (the processing below will take care of everything up to: **Train Top2Vec Model**) # # i.e. turn each section into its own row, and treat sections as their own documents for the purposes of embedding. # # Here we've written a custom processing function (a _data transformer_) called `create_section_df` that will take in the current dataset and produce a new, transformed dataset. from src.data.localdata import create_section_df help(create_section_df) # filter however you like based on the metadata. We'll just demo with the first 100 entries df = meta_df[:100] # %%time parsed_df = create_section_df(df) parsed_df.head()
notebooks/01-Start-Your-Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import io_mesh as io import subprocess import matplotlib.pyplot as plt # %matplotlib inline # + def calculate_area(surfname,fwhm): """calculate surface area using minctools""" try: subprocess.call("depth_potential -area_voronoi " + surfname + " /tmp/tmp_area.txt",shell=True) subprocess.call("depth_potential -smooth " + str(fwhm) + " /tmp/tmp_area.txt " + surfname + " /tmp/sm_area.txt",shell=True) area=np.loadtxt("/tmp/sm_area.txt") subprocess.call("rm /tmp/sm_area.txt /tmp/tmp_area.txt",shell=True) except OSError: print("depth_potential not found, please install CIVET tools or replace with alternative area calculation/data smoothing") return 0; return area; def beta(alpha, aw, ap): """Compute euclidean distance fraction, beta, that will yield the desired volume fraction, alpha, given vertex areas in the white matter surface, aw, and on the pial surface, ap. A surface with `alpha` fraction of the cortical volume below it and `1 - alpha` fraction above it can then be constructed from pial, px, and white matter, pw, surface coordinates as `beta * px + (1 - beta) * pw`. """ if alpha == 0: return np.zeros_like(aw) elif alpha == 1: return np.ones_like(aw) else: return 1-(1 / (ap - aw) * (-aw + np.sqrt((1-alpha)*ap**2 + alpha*aw**2))) def rho(alpha, aw, ap): """Compute equivolumetric distance fraction, rho from the euclidean depth alpha, given vertex areas in the white matter surface, aw, and on the pial surface, ap. A surface with `alpha` fraction of the cortical volume below it and `1 - alpha` fraction above it can then be constructed from pial, px, and white matter, pw, surface coordinates as `beta * px + (1 - beta) * pw`. """ return (-aw + np.sqrt(alpha * ap**2 + (1-alpha)*aw**2))/(ap-aw) # + #import surfaces fwhm = 2 hemi="left" prefix="smMarch_surfs_" NNDir="/data1/users/kwagstyl/bigbrain/NeuralNetworks/Surfaces200218/" white_surf = NNDir + prefix + hemi + "_layer5.obj" gray_surf = NNDir + prefix + hemi + "_layer0.obj" wm = io.load_mesh_geometry(white_surf) gm = io.load_mesh_geometry(gray_surf) wm_vertexareas = calculate_area(white_surf, fwhm) pia_vertexareas = calculate_area(gray_surf, fwhm) # - #import thickness maps Thickness=np.zeros((6,len(wm['coords']))) for n in range(6): Thickness[n,:]=np.loadtxt('thickness_'+hemi+'_layer'+str(n+1)+'.txt') Total=np.sum(Thickness,axis=0) Fractional=Thickness/Total # + equi_depth=np.zeros((6,len(wm['coords']))) fractional_depth=np.zeros((6,len(wm['coords']))) equi_thickness=np.zeros((6,len(wm['coords']))) for n in range(6): fractional_depth[n]=np.sum(Fractional[:n+1,:],axis=0) equi_depth[n,:]=rho(fractional_depth[n],wm_vertexareas,pia_vertexareas) if n>0: equi_thickness[n,:] = equi_depth[n,:] - equi_depth[n-1,:] else: equi_thickness[n,:] = equi_depth[n,:] equi_thickness=np.nan_to_num(equi_thickness) np.savetxt('equi_thickness'+hemi+'_'+str(n+1)+'.txt', equi_thickness[n,:],fmt='%.4f') subprocess.call("depth_potential -smooth " + str(fwhm) + " equi_thickness"+hemi+"_"+str(n+1)+".txt " + white_surf + " smequi_thickness"+hemi+"_"+str(n+1)+".txt",shell=True) # - ratio=np.sum(equi_thickness[1:3],axis=0)/np.sum(equi_thickness[4:],axis=0) np.isinf(ratio).any() ratio[np.where(np.isnan(ratio))[0]]=0 ratio[np.where(np.isinf(ratio))[0]]=5 ratio=np.nan_to_num(ratio) np.savetxt('ratio'+hemi+'.txt', ratio,fmt='%.4f') subprocess.call("depth_potential -smooth " + str(5) + " ratio"+hemi+".txt " + white_surf + " smratio"+hemi+".txt",shell=True) fractional_depth[:,0] equi_depth[3,0] ratio[np.where(ratio>5)[0]]=5 plt.scatter(wm['coords'][:,1],ratio,alpha=0.1)
scripts/notebooks/Equivolumetric_thicknesses.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 4 - Evaluation and Optimization # %pylab inline import pandas as pandas # We generate two inputs: # * features – a matrix of input features # * target – an array of target variables corresponding to those features #uso 5 CATEGORIE BOOLEANE A CASO #e uso numeri random come target features = rand(100,5) target = rand(100) > 0.5 # ### The holdout method # # We divide into a randomized training and test set: int(floor(0.7*100)) # + N = features.shape[0] N_train = int(floor(0.7 * N)) # Randomize index # Note: sometimes you want to retain the order in the dataset and skip this step # E.g. in the case of time-based datasets where you want to test on 'later' instances idx = random.permutation(N) # Split index idx_train = idx[:N_train] idx_test = idx[N_train:] # Break your data into training and testing subsets features_train = features[idx_train,:] target_train = target[idx_train] features_test = features[idx_test,:] target_test = target[idx_test] # Build, predict, evaluate (to be filled out) # model = train(features_train, target_train) # preds_test = predict(model, features_test) # accuracy = evaluate_acc(preds_test, target_test) # - print(features_train.shape) print(features_test.shape) print(target_train.shape) print(target_test.shape) # ### K-fold cross-validation # + N = features.shape[0] K = 10 # number of folds #ad ogni riga di dati associa, a caso, un'etichetta di FOLD così non separa #i dati in modo contiguo ma randomico creando 10 "fold" cioè dieci cluster #usando il randint non avrò fold omogenei nel senso 10 fold da 10 #ma avrò 10 fold ognuno con 8, 13, 10, 6 elementi, etc.... preds_kfold = np.empty(N) folds = np.random.randint(0, K, size=N) print(folds) #ciclo tutti i fold, butto i (k-1) nel training e il k-simo per il test for idx in np.arange(K): # For each fold, break your data into training and testing subsets features_train = features[folds != idx,:] target_train = target[folds != idx] features_test = features[folds == idx,:] # Print the indices in each fold, for inspection print("Positions of "+str(idx)+" in fold array: ", end="") print(nonzero(folds == idx)[0]) # Build and predict for CV fold (to be filled out) # model = train(features_train, target_train) # preds_kfold[folds == idx] = predict(model, features_test) # accuracy = evaluate_acc(preds_kfold, target) # - # ### The ROC curve def roc_curve(true_labels, predicted_probs, n_points=100, pos_class=1): thr = linspace(0,1,n_points) tpr = zeros(n_points) fpr = zeros(n_points) pos = true_labels == pos_class neg = logical_not(pos) n_pos = count_nonzero(pos) n_neg = count_nonzero(neg) for i,t in enumerate(thr): tpr[i] = count_nonzero(logical_and(predicted_probs >= t, pos)) / n_pos fpr[i] = count_nonzero(logical_and(predicted_probs >= t, neg)) / n_neg return fpr, tpr, thr # Randomly generated predictions should give us a diagonal ROC curve preds = rand(len(target)) fpr, tpr, thr = roc_curve(target, preds, pos_class=True) plot(fpr, tpr) # ### The area under the ROC curve def auc(true_labels, predicted_labels, pos_class=1): fpr, tpr, thr = roc_curve(true_labels, predicted_labels, pos_class=pos_class) area = -trapz(tpr, x=fpr) return area auc(target, preds, pos_class=True) # ### Multi-class classification d = pandas.read_csv("data/mnist_small.csv") d_train = d[:int(0.8*len(d))] d_test = d[int(0.8*len(d)):] from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(d_train.drop('label', axis=1), d_train['label']) from sklearn.metrics import confusion_matrix preds = rf.predict(d_test.drop('label', axis=1)) cm = confusion_matrix(d_test['label'], preds) matshow(cm, cmap='Greys') colorbar() savefig("figures/figure-4.19.eps", format='eps') # ### The root-mean-square error def rmse(true_values, predicted_values): n = len(true_values) residuals = 0 for i in range(n): residuals += (true_values[i] - predicted_values[i])**2. return np.sqrt(residuals/n) rmse(rand(10), rand(10)) # ### The R-squared error def r2(true_values, predicted_values): n = len(true_values) mean = np.mean(true_values) residuals = 0 total = 0 for i in range(n): residuals += (true_values[i] - predicted_values[i])**2. total += (true_values[i] - mean)**2. return 1.0 - residuals/total r2(arange(10)+rand(), arange(10)+rand(10)) # ### Grid search with kernel-SVM model # # Importing modules: #per calcolare AUC uso tool di scikit from sklearn.metrics import roc_auc_score from sklearn.svm import SVC # Loading data and performang poor-mans feature engineering: # + d = pandas.read_csv("data/titanic.csv") # Target y = d["Survived"] # Selezione e puulizia delle features, fatto al volo con funzioni lambda X = d.drop(["Survived", "PassengerId", "Cabin","Ticket","Name", "Fare"], axis=1) X['Sex'] = list(map(lambda x: 1 if x=="male" else 0, X['Sex'])) X['Embarked-Q'] = list(map(lambda x: 1 if x=="Q" else 0, X['Embarked'])) X['Embarked-C'] = list(map(lambda x: 1 if x=="C" else 0, X['Embarked'])) X['Embarked-S'] = list(map(lambda x: 1 if x=="S" else 0, X['Embarked'])) #X = X.drop(["Embarked", "Sex"], axis=1) X = X.drop(["Embarked"], axis=1) X = X.fillna(-1) # - # Performing grid-search to find the optimal hyper-parameters: # + # grid of (gamma, C) values to try gam_vec, cost_vec = np.meshgrid(np.logspace(0.01, 0.1, 11), np.linspace(1, 5, 10)) AUC_all = [] # initialize empty array to store AUC results # set up cross-validation folds N = len(y) K = 10 # number of cross-validation folds folds = np.random.randint(0, K, size=N) # search over every value of the grid #il comando 'ravel' linearizza la matrice for param_ind in np.arange(len(gam_vec.ravel())): # initialize cross-validation predictions y_cv_pred = np.empty(N) # loop through the cross-validation folds for ii in np.arange(K): # break your data into training and testing subsets # X_train = X.ix[folds != ii,:] # y_train = y.ix[folds != ii] # X_test = X.ix[folds == ii,:] X_train = X.iloc[folds != ii,:] y_train = y.iloc[folds != ii] X_test = X.iloc[folds == ii,:] #X_train = X.iloc[folds, :] #X_train = X_train.drop(ii) #y_train = y.iloc[folds] #y_train = y.drop(ii) #X_test = X.iloc[folds, :] #X_test = X_test[folds == ii] # build a model on the training set model = SVC(gamma=gam_vec.ravel()[param_ind], C=cost_vec.ravel()[param_ind]) model.fit(X_train, y_train) # generate and store model predictions on the testing set y_cv_pred[folds == ii] = model.predict(X_test) # evaluate the AUC of the predictions AUC_all.append(roc_auc_score(y, y_cv_pred)) indmax = np.argmax(AUC_all) print("Maximum = %.3f" % (np.max(AUC_all))) print("Tuning Parameters: (gamma = %.2f, C = %.2f)" % (gam_vec.ravel()[indmax], cost_vec.ravel()[indmax])) # - ix=2 print(folds) # Train subset taking all rows except the ones with index == to the positions of ix in the folds array X_train = X.iloc[folds!=ix,:] print(X_train.head(20)) X_test = X.iloc[folds==ix,:] print(X_test.head(20)) # Plotting the contours of the parameter performance: # + AUC_grid = np.array(AUC_all).reshape(gam_vec.shape) contourf(gam_vec, cost_vec, AUC_grid, 20, cmap='Greys') xlabel("kernel coefficient, gamma") ylabel("penalty parameter, C") colorbar() savefig("figures/figure-4.25.eps", format='eps') # -
Chapter 4 - Evaluation and Optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # -*- coding: utf-8 -*- from __future__ import unicode_literals # text in Western (Windows 1252) import matplotlib.pyplot as plt import numpy as np import sklearn import sklearn.datasets import matplotlib import theano import theano.tensor as T import pydot from IPython.display import Image from IPython.display import SVG import timeit # Display plots inline and change default figure size # %matplotlib inline matplotlib.rcParams['figure.figsize'] = (10.0, 8.0) # + import h5py file_name = 'data_theano.h5' def save_model(model): # a = np.random.random(size=(100,20)) # h5f.close() h5f = h5py.File(file_name, 'w') adict=dict(W1=model['W1'], b1=model['b1'], W2=model['W2'], b2=model['b2']) for k,v in adict.items(): h5f.create_dataset(k,data=v) # h5f.create_dataset('dataset_1', data=model) h5f.close() def load_model(): h5f = h5py.File(file_name,'r') model = {} W1.set_value(h5f['W1'][:]) b1.set_value(h5f['b1'][:]) W2.set_value(h5f['W2'][:]) b2.set_value(h5f['b2'][:]) # model['W1'] = h5f['W1'][:] # model['b1'] = h5f['b1'][:] # model['W2'] = h5f['W2'][:] # model['b2'] = h5f['b2'][:] h5f.close() return model # - with open('../../data/SlovarIJS_BESEDE_utf8.lex') as f: content = f.readlines() content = [x.decode('utf8').split('\t') for x in content] # + # CREATE dictionary AND max_word accetuated_vowels = [u'à', u'á', u'ä', u'é', u'ë', u'ì', u'í', u'î', u'ó', u'ô', u'ö', u'ú', u'ü'] default_vowels = [u'a', u'e', u'i', u'o', u'u'] vowels = [] vowels.extend(accetuated_vowels) vowels.extend(default_vowels) def is_vowel(word_list, position): if word_list[position] in vowels: return True if word_list[position] == u'r' and \ (position - 1 < 0 or word_list[position - 1] not in vowels) and \ (position + 1 >= len(word_list) or word_list[position + 1] not in vowels): return True return False dictionary = [''] line = 0 max_word = 0 # ADD 'EMPTY' VOWEL max_num_vowels = 0 for el in content: num_vowels = 0 i = 0 try: if len(el[3]) > max_word: max_word = len(el[3]) # print line if len(el[0]) > max_word: max_word = len(el[0]) print line for c in list(el[3]): if is_vowel(list(el[3]), i): num_vowels += 1 if c not in dictionary: dictionary.append(c) i += 1 for c in list(el[0]): if c not in dictionary: dictionary.append(c) if num_vowels > max_num_vowels: max_num_vowels = num_vowels # print line except Exception, e: print line - 1 print el line += 1 dictionary = sorted(dictionary) max_num_vowels += 1 # for e in dictionary: # print e # + # GENERATE X and y def generate_presentable_y(accetuations_list, word_list): # print accetuations_list # print word_list while len(accetuations_list) < 2: accetuations_list.append(0) if len(accetuations_list) > 2: accetuations_list = accetuations_list[:2] # empty has to be positive accetuations_list = np.array(accetuations_list) # accetuations_list += 1 final_position = accetuations_list[0] + max_num_vowels * accetuations_list[1] return final_position X = np.zeros((len(content), max_word*len(dictionary))) y = np.zeros((len(content), max_num_vowels * max_num_vowels )) i = 0 for el in content: j = 0 for c in list(el[0]): index = 0 for d in dictionary: if c == d: X[i][index + j * max_word] = 1 break index += 1 j += 1 j = 0 word_accetuations = [] num_vowels = 0 for c in list(el[3]): index = 0 # in_dict = False if is_vowel(el[3], j): num_vowels += 1 for d in accetuated_vowels: # print c # print d if c == d: # print 'HERE!' word_accetuations.append(num_vowels) # y[j * len(dictionary) + index][i] = 1 # in_dict = True break index += 1 # if not in_dict: # print 'NOOOOO!!!' # print el # print i j += 1 # print len(y.T[i]) # print sum(y.T[i]) # y = y.T y[i][generate_presentable_y(word_accetuations, list(el[3]))] = 1 # print '---------------------------------' # for a in y[i]: # print a # print sum(y[i]) # print y[:][i] # if i > 100: # break i += 1 # + def decode_position(y): max_el = 0 i = 0 pos = -1 # pos = y.argmax(axis=0) for el in y: # print el if el > max_el: max_el = el # print 'HERE!!!' # print pos pos = i i += 1 return [pos % max_num_vowels, pos / max_num_vowels] decode_position(y[157303]) # decode_position(y[157241]) # print y[2] # + X = X[:100000] y = y[:100000] def unison_shuffled_copies(a, b): assert len(a) == len(b) p = np.random.permutation(len(a)) return a[p], b[p] X, y = unison_shuffled_copies(X, y) # print X.shape train_X = X.astype(np.float32) train_y = y.astype(np.int32) num_examples = len(X) # training set size nn_input_dim = max_word * len(dictionary) # input layer dimensionality nn_output_dim = max_num_vowels * max_num_vowels # output layer dimensionality nn_hdim = 500 # Gradient descent parameters (I picked these by hand) epsilon = 1 # learning rate for gradient descent reg_lambda = 1 # regularization strength # + train_y.shape # Our data vectors X = T.matrix('X') # matrix of doubles y = T.lmatrix('y') # vector of doubles # Shared variables with initial values. We need to learn these. W1 = theano.shared(np.random.randn(nn_input_dim, nn_hdim), name='W1') b1 = theano.shared(np.zeros(nn_hdim), name='b1') W2 = theano.shared(np.random.randn(nn_hdim, nn_output_dim), name='W2') b2 = theano.shared(np.zeros(nn_output_dim), name='b2') # + # Forward propagation # Note: We are just defining the expressions, nothing is evaluated here! z1 = X.dot(W1) + b1 a1 = T.tanh(z1) z2 = a1.dot(W2) + b2 y_hat = T.nnet.softmax(z2) # output probabilties # The regularization term (optional) loss_reg = 1./num_examples * reg_lambda/2 * (T.sum(T.sqr(W1)) + T.sum(T.sqr(W2))) # the loss function we want to optimize loss = T.nnet.categorical_crossentropy(y_hat, y).mean() + loss_reg # loss = T.nnet.binary_crossentropy(y_hat, y).mean() + loss_reg # Returns a class prediction prediction = T.argmax(y_hat, axis=1) # + # Theano functions that can be called from our Python code forward_prop = theano.function([X], y_hat) calculate_loss = theano.function([X, y], loss) predict = theano.function([X], prediction) # Example call: Forward Propagation # forward_prop([[1,2]]) # - dW2 = T.grad(loss, W2) db2 = T.grad(loss, b2) dW1 = T.grad(loss, W1) db1 = T.grad(loss, b1) gradient_step = theano.function( [X, y], updates=((W2, W2 - epsilon * dW2), (W1, W1 - epsilon * dW1), (b2, b2 - epsilon * db2), (b1, b1 - epsilon * db1))) # This function learns parameters for the neural network and returns the model. # - num_passes: Number of passes through the training data for gradient descent # - print_loss: If True, print the loss every 1000 iterations def build_model(num_passes=125, print_loss=False): # Re-Initialize the parameters to random values. We need to learn these. # (Needed in case we call this function multiple times) np.random.seed(0) W1.set_value(np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)) b1.set_value(np.zeros(nn_hdim)) W2.set_value(np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)) b2.set_value(np.zeros(nn_output_dim)) # Gradient descent. For each batch... for i in xrange(0, num_passes): # print type(W1) save_model({'W1': W1.get_value(borrow=True), 'b1': b1.get_value(borrow=True), 'W2': W2.get_value(borrow=True), 'b2': b2.get_value(borrow=True)}) # This will update our parameters W2, b2, W1 and b1! print 'NUM: ' + str(i) gradient_step(train_X, train_y) # Optionally print the loss. # This is expensive because it uses the whole dataset, so we don't want to do it too often. if print_loss and i % 25 == 0: print "Loss after iteration %i: %f" %(i, calculate_loss(train_X, train_y)) print type(train_y[0][0]) print train_X[:3] print train_X.shape print train_y.shape print train_y[:3] # + # Build a model with a 3-dimensional hidden layer build_model(print_loss=True) # Plot the decision boundary plot_decision_boundary(lambda x: predict(x)) plt.title("Decision Boundary for hidden layer size 3") # - predict([generate_input_from_word('testiram')])[0] # + def generate_input_from_word(word): x = np.zeros(max_word*len(dictionary)) j = 0 for c in list(word): index = 0 for d in dictionary: if c == d: x[index + j * max_word] = 1 break index += 1 j += 1 return x model = load_model() prediction = predict(model, generate_input_from_word('hidrija')) print decode_position(prediction[0])
cnn/character_based_ffnn_theano.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,py:light # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ERA5 monthly averaged data on single levels from 1985 to 2014 # ERA5 form: https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-single-levels?tab=form # # ERA5 variables: https://confluence.ecmwf.int/display/CKB/ERA5%3A+data+documentation import cdsapi import glob path = '/home/franzihe/nird_NS9600K/franzihe/data/ERA5/3_hourly/' form = 'netcdf' # 'grib' available_month = {'01':['01'], '02':['02'], '03':['03'], '04':['04'], '05':['05'], '06':['06'], '07':['07'], '08':['08'], '09':['09'], '10':['10'], '11':['11'], '12':['12'] } available_years = { "08": ['2008',], } variable = {'tp' :'total_precipitation', 'sf' :'snowfall', } counter = 0 for keys, var in variable.items(): for years in available_years: for months in available_month: if form == 'netcdf': _ff = 'nc' elif form == 'grib': _ff = 'grib' filename = '{keys}_3hourly_ERA5_{starty}{startm}.{format}'.format(keys = keys, starty = available_years[years][0], startm = available_month[months][0], format = _ff) files = glob.glob(path+filename) if path + filename in files: print(path + filename + ' is downloaded') counter += 1 print("Have downloaded in total : " + str(counter) + " files") else: c = cdsapi.Client() c.retrieve( 'reanalysis-era5-single-levels', { 'product_type': 'reanalysis', 'format': form, 'variable': var, 'year': available_years[years], 'month': available_month[months], 'day': [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', ], 'time': [ '00:00', '03:00', '06:00', '09:00', '12:00', '15:00', '18:00', '21:00', ], }, path + filename) print('File saved: {}'.format(path + filename))
download_hourly_single_level.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import boto3 import botocore from botocore.exceptions import ClientError # + # List existing S3 Buckets def list_buckets(): s3 = boto3.client('s3') response = s3.list_buckets() # Output the bucket names print('Existing buckets:') for bucket in response['Buckets']: print(f' {bucket["Name"]}') list_buckets() # + # Create an S3 Bucket def create_bucket(bucketname): s3 = boto3.client('s3') try: s3.create_bucket(Bucket=bucketname) except ClientError as e: print(e) return "Something wrong happened" return "S3 Bucket has been created" create_bucket('') # - # Upload to S3 Bucket def upload_s3(localfilename, bucketname, s3filename): s3 = boto3.client('s3') s3.upload_file(filelocation, bucketname, s3filename) return "File has been uploaded to S3 Bucket" # Download from an S3 Bucket def download_s3(s3filename, bucketname, localfilename): s3 = boto3.resource('s3') try: s3.Bucket(bucketname).download_file(s3filename, localfilename) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": print("The object does not exist.") else: raise
S3 Bucket.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![rmotr](https://user-images.githubusercontent.com/7065401/52071918-bda15380-2562-11e9-828c-7f95297e4a82.png) # <hr style="margin-bottom: 40px;"> # # # Pandas Series exercises # # + # Import the numpy package under the name np import numpy as np # Import the pandas package under the name pd import pandas as pd # Print the pandas version and the configuration print(pd.__version__) # - # ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png) # # ## Series creation # ### Create an empty pandas Series # your code goes here # + [solution] pd.Series() # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X python list convert it to an Y pandas Series # your code goes here # + [solution] X = ['A','B','C'] print(X, type(X)) Y = pd.Series(X) print(Y, type(Y)) # different type # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, name it 'My letters' # your code goes here # + [solution] X = pd.Series(['A','B','C']) X.name = 'My letters' X # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, show its values # # your code goes here # + [solution] X = pd.Series(['A','B','C']) X.values # - # ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png) # # ## Series indexation # ### Assign index names to the given X pandas Series # # your code goes here # + [solution] X = pd.Series(['A','B','C']) index_names = ['first', 'second', 'third'] X.index = index_names X # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, show its first element # # your code goes here # + [solution] X = pd.Series(['A','B','C'], index=['first', 'second', 'third']) #X[0] # by position #X.iloc[0] # by position X['first'] # by index # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, show its last element # # your code goes here # + [solution] X = pd.Series(['A','B','C'], index=['first', 'second', 'third']) #X[-1] # by position #X.iloc[-1] # by position X['third'] # by index # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, show all middle elements # # your code goes here # + [solution] X = pd.Series(['A','B','C','D','E'], index=['first','second','third','forth','fifth']) #X[['second', 'third', 'forth']] #X.iloc[1:-1] # by position X[1:-1] # by position # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, show the elements in reverse position # # your code goes here # + [solution] X = pd.Series(['A','B','C','D','E'], index=['first','second','third','forth','fifth']) #X.iloc[::-1] X[::-1] # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, show the first and last elements # # your code goes here # + [solution] X = pd.Series(['A','B','C','D','E'], index=['first','second','third','forth','fifth']) #X[['first', 'fifth']] #X.iloc[[0, -1]] X[[0, -1]] # - # ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png) # # ## Series manipulation # ### Convert the given integer pandas Series to float # # your code goes here # + [solution] X = pd.Series([1,2,3,4,5], index=['first','second','third','forth','fifth']) pd.Series(X, dtype=np.float) # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Reverse the given pandas Series (first element becomes last) # your code goes here # + [solution] X = pd.Series([1,2,3,4,5], index=['first','second','third','forth','fifth']) X[::-1] # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Order (sort) the given pandas Series # # your code goes here # + [solution] X = pd.Series([4,2,5,1,3], index=['forth','second','fifth','first','third']) X = X.sort_values() X # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, set the fifth element equal to 10 # # your code goes here # + [solution] X = pd.Series([1,2,3,4,5], index=['A','B','C','D','E']) X[4] = 10 X # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, change all the middle elements to 0 # # your code goes here # + [solution] X = pd.Series([1,2,3,4,5], index=['A','B','C','D','E']) X[1:-1] = 0 X # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, add 5 to every element # # your code goes here # + [solution] X = pd.Series([1,2,3,4,5]) X + 5 # - # ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png) # # ## Series boolean arrays (also called masks) # ### Given the X pandas Series, make a mask showing negative elements # # your code goes here # + [solution] X = pd.Series([-1,2,0,-4,5,6,0,0,-9,10]) mask = X <= 0 mask # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, get the negative elements # # your code goes here # + [solution] X = pd.Series([-1,2,0,-4,5,6,0,0,-9,10]) mask = X <= 0 X[mask] # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, get numbers higher than 5 # # your code goes here # + [solution] X = pd.Series([-1,2,0,-4,5,6,0,0,-9,10]) mask = X > 5 X[mask] # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, get numbers higher than the elements mean # your code goes here # + [solution] X = pd.Series([-1,2,0,-4,5,6,0,0,-9,10]) mask = X > X.mean() X[mask] # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, get numbers equal to 2 or 10 # # your code goes here # + [solution] X = pd.Series([-1,2,0,-4,5,6,0,0,-9,10]) mask = (X == 2) | (X == 10) X[mask] # - # ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png) # # ## Logic functions # ### Given the X pandas Series, return True if none of its elements is zero # your code goes here # + [solution] X = pd.Series([-1,2,0,-4,5,6,0,0,-9,10]) X.all() # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, return True if any of its elements is zero # # your code goes here # + [solution] X = pd.Series([-1,2,0,-4,5,6,0,0,-9,10]) X.any() # - # ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png) # # ## Summary statistics # ### Given the X pandas Series, show the sum of its elements # # your code goes here # + [solution] X = pd.Series([3,5,6,7,2,3,4,9,4]) #np.sum(X) X.sum() # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, show the mean value of its elements # your code goes here # + [solution] X = pd.Series([1,2,0,4,5,6,0,0,9,10]) #np.mean(X) X.mean() # - # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ### Given the X pandas Series, show the max value of its elements # your code goes here # + [solution] X = pd.Series([1,2,0,4,5,6,0,0,9,10]) #np.max(X) X.max() # - # ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png)
Pandas/2 - Pandas Series exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CS207 Project Group 11 - "The Differentiators" # # Milestone 2 # # ***** # ## Introduction # # Derivatives are ubiquitous in many fields such as engineering design optimization, fluid dynamics and machine learning. # There are in general three ways to calculate the derivatives: automatic differentiation, numeric differentiation, and # symbolic differentiation. Automatic Differentiation (AD) brings a family of techniques that can calculate the partial # derivatives of any function at any point efficiently and accurately. Unlike numeric differentiation, AD does not have # the problem of floating point precision errors, since it calculates the derivative of a simple function, and keeps track of # these derivatives, and there is no need of step sizes. Compared to symbolic differentiation, AD is not as memory intense, # and can be much faster in terms of the calculation. Therefore, AD is an important way to calculate derivatives in # practice. # # The software that we design calculates the derivatives given the user’s input using the forward mode of automatic differentiation, # and provides the user with an easy way to solve their optimization problem using derivatives. # # ## Background # # At the core of Automatic Differentiation is the principle that functions implemented as computer code can be broken down into elementary functions, ranging from arithmetic operations (e.g. addition, subtraction etc.) and other functions (e.g. power, exponential, sin etc.). Hence, any differentiable function can be interpreted as a composition of different functions. # # For example, given a function, $f = sin^2(2x)$, it can be rewritten as: # # $$ f = \phi_1(\phi_2(\phi_3(x))) $$ # # where $$ \phi_1(z) = z^2, \phi_2(y) = sin(y) \text{ and } \phi_3(x) = 2x$$ # # # In the forward mode, the chain rule can then be applied successively to each elementary component function to obtain the derivative of the function. Using the same example above, let $c$ be a real number: # $$ f'(c) = \phi_3'(\phi_2(\phi_1(c))) \cdot \phi_2'(\phi_1(c)) \cdot \phi_1'(c)$$ # # Based on the example above, the derivative, $f'(c)$, can be evaluated based on the following function-derivative pairs at each stage of computing the function: # # $$(\phi_1(c), \phi_1'(c))$$ # # $$(\phi_2(\phi_1(c)), (\phi_2'(\phi_1(c)) \cdot \phi_1'(c)))$$ # # $$(\phi_3(\phi_2(\phi_1(c))), \phi_3'(\phi_2(\phi_1(c)) \cdot \phi_2'(\phi_1(c)) \cdot \phi_1'(c))$$ # # Effectively, the forward mode computes the Jacobian-vector product, $Jp$. This decomposition can be represented via a computational graph structure of calculations, requiring initial values to be set for $x_1$, and $x'_1$: # # $$x_1 \rightarrow^{\phi_3(x)} x_2 \rightarrow^{\phi_2(x)} x_3 \rightarrow^{\phi_1(x)} y $$ # # where $$ \phi_1(x) = x^2, \phi_2(x) = sin(x) \text{ and } \phi_3(x) = 2x$$ # # At each stage of the function, the derivative of the function with respect to its argument is calculated. The exact values of the function and its derivative are used for the following function-derivative pair of values. An example of the computational trace for the equation $f = sin^2(2x)$ would look like this, for $x = \dfrac{\pi}{6}$. # # | Trace | Elementary Operation &nbsp;&nbsp;&nbsp;| Derivative &nbsp;&nbsp;&nbsp; | $\left(f\left(a\right), f^{\prime}\left(a\right)\right)$&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;| # | :------: | :----------------------: | :------------------------------: | :------------------------------: | # | $x_{1}$ | $\dfrac{\pi}{6}$ | $1$ | $\left(\dfrac{\pi}{6}, 1\right)$ | # | $x_{2}$ | $2x_{1}$ | $2\dot{x}_{1}$ | $\left(\dfrac{\pi}{3}, 2\right)$ | # | $x_{3}$ | $\sin(x_{2})$ | $\cos\left(x_{2}\right)\dot{x}_{2}$ | $\left(\dfrac{\sqrt{3}}{2}, 1\right)$ | # | $x_{4}$ | $x_{3}^{2}$ | $2x_{3}\dot{x}_{3}$ | $\left(\dfrac{3}{4}, \sqrt{3}\right)$ | # # By evaluating the derivative at each step of the chain rule, we eventually obtain the value of the derivative $f'(x) = \sqrt{3}$ at $x = \dfrac{\pi}{6}$, as second entry of the final tuple in the table. # # While the above illustrates the forward mode of AD (the focus of our package), AD also has a reverse mode. Without using chain rule, it first does a forward pass to store the partial derivatives, before undertaking a reverse pass, which starts from the final function to be differentiated, $y$. After fixing the derivative of the final function, it then computes the derivative of each component function with respect to its parent function recursively (using chain rule) until the derivative of the function with respect to the basic-level argument (e.g. $x_1$) can be calculated. # # In terms of efficiency, the forward mode is more efficient when the number of functions to evaluate is much greater than the number of inputs, whereas the reverse mode, which computes the Jacobian-transpose-vector-product is more efficient when the number of inputs is much greater than the number of functions. # ## How to use `AutoDiff` # # At this milestone, the recommended method to access the package is to download or clone the package's Github repo (https://github.com/the-differentiators/cs207-FinalProject.git). The user must ensure that the package's requirements (`numpy`) are installed, or install them manually. # # By the final submission, the package will be available on PyPI and the user will be able to install the package in the standard way using `pip`. We will also provide a `yml` file which will be used by the user to create the appropriate environment and ensure that dependencies like `numpy` have been installed. Then, to use the package, the user will only need to import our package which will implicitly import any other packages used by `AutoDiff`. # # The following steps will walk the user through a demo of how to install and use the `AutoDiff` package: # #### Importing `AutoDiff` and requirements # The following code is first used to change the path directory of this document to the same folder as that of the package # Code to change directory to src folder import os path = os.getcwd().replace('docs','src') os.chdir(path) # Once the package's Github repo has been downloaded, the `AutoDiff` package can be imported with the following code by a Python file, with its working directory configured to the same directory as `AutoDiff.py`: import AutoDiff as AD # For the purposes of this demo, we will import `numpy`, which is a requirement for the *AutoDiff* package, as well as the `Ad_Var` class from the `AutoDiff` package import numpy as np from AutoDiff import Ad_Var # #### Using `AutoDiff` to compute derivative of a scalar function # Below, we have included a basic demo for a scalar function, given a single input. The function used in the demo is $f = sin^2(2x)$, which was used for illustration in the *Background* section earlier. Our objective is to use the the `Ad_Var` class to compute the value of the derivative for this function automatically, unlike the manual computational trace drawn out earlier. # # First, we create an instance of the `Ad_Var` object, with the value of $x = \dfrac{\pi}{6}$ assigned to the input variable, `val`. # + a = np.pi / 6 x = Ad_Var(a) # - # The user should note that the `AutoDiff` package assumes that for a single input, the object being initialised will have a derivative value of 1 (stored as a Class attribute `self._ders`). # Next, we create `f`, which represents the full function. The `Ad_Var` object from the previous code can be used with dunder functions and additional functions within `Ad_Var` class to construct the full function being evaluated. f = (Ad_Var.sin(2*x))**2 # As the functions are applied to the original `Ad_Var` object `x`, the `_val` and `_ders` attributes of the object are being updated with new values. The object `f`, representing the full function, will have its `_val` and `_ders` attributes containing the actual function and derivative values respectively. # # To note: the user does not have the ability to manually set function and derivative values outside of instance initialization. Instead, the `_val` and `_ders` attributes will be made pseudoprivate. # # The associated function value and derivative(s) of any `Ad_Var` instance may be retrieved through the `get_val` and `get_ders` functions as shown below: print(f.get_val(), f.get_ders()) # This almost returns us the values that we would have computed manually, except for the rounding error from floating point operations in Python. To verify this, we run the following assert statements on the function and derivative values computed using `numpy`, using $f = sin^2(2x)$ and $f' = 4sin(2x)cos(2x)$ # + val = np.pi / 6 assert(f.get_val()== (np.sin(2*val)**2)) assert(f.get_ders()== (4*np.sin(2*val)*np.cos(2*val))) # - # ## Software Organization # ### Directory structure # Our intended directory structure is as follows: # ``` # cs207-FinalProject/ # README.md # requirements.txt # docs/ # milestone1.pdf # milestone2.ipynb # src/ # AutoDiff.py # test/ # test_autodiff.py # # ``` # # ### Modules # # The primary module will be a single `AutoDiff.py` file. Contained within will be the definition for an `Ad_Var` class. Instances of this class, through interaction with other `Ad_Var` objects, will be able to compute the value of a function as well as the value of that function's derivative with respect to any input variable. At present, we envision that this module will be powerful enough to handle forward differentiation of any function comprised of the following elementary functions: # # * Fundamental arithmetic operators (addition, subtraction, multiplication, and division) # * Logarithm (of any base) # * Negation # * Exponentiation ($e^x$ for an `Ad_Var` instance $x$) # * Power and root functions ($x^n$ for some real $n$) # * Trigonometric functions ($\sin(x)$, $\cos(x)$, $\tan(x)$) # * Inverse trigonometric functions ($\arcsin(x)$, $\arccos(x)$, $\arctan(x)$) # # Depending on our eventual choice for the "additional" feature of this project, or future design decisions, there # may be additional modules added in the future that supplement or subdivide the functionality of `AutoDiff.py`. # # Each instance of the `Ad_Var` class in the `AutoDiff` package represents the definition of a set of variables at a particular evaluation point. Through manipulations of these instances (either through fundamental arithmetic operations or built-in methods representing additional elementary functions described earlier), a user has the capability of representing any continuous differentiable function, be it scalar or vector. This was shown earlier via a code demo. # # # ### Testing and Coverage # In the `test` folder, there will be a separate Python module `test_autodiff.py`, which will be the test-suite for `AutoDiff.py`. # # The test-suite will contain tests for the methods in the `Ad_Var` class, to ensure that the elementary functions return the desired output. Tests are run using pytest. The tests are linked to Travis CI and CodeCov, which will manage continuous integration and code coverage respectively. # # ### Installation and distribution of package # At this milestone, the recommended method to access the package is to download or clone the package's Github repo (https://github.com/the-differentiators/cs207-FinalProject.git). The user must ensure that the package's requirements (`numpy`) are installed, or install them manually. # # By the final submission, we intend to distribute the package via PyPI. There will be no additional packaging framework included; we believe the scope of this project can be contained within a relatively simple directory structure with few functional python files and should not require additional overhead for users to install and use. # # The package will be available on PyPI and the user will be able to install the package in the standard way using `pip`. We will also provide a `yml` file which will be used by the user to create the appropriate environment and ensure that dependencies like `numpy` have been installed. Then, to use the package, the user will only need to import our package which will implicitly import any other packages used by `AutoDiff`. # ## Implementation Details # # ### Core Data Structures # * `numpy` arrays: 1-D `numpy` arrays will be used to keep the gradient vectors as the entire trace is evaluated. `numpy` # provides vectorized operations which will make the overloading of elementary functions much more efficient for # multivariate functions. If a vector function is provided, 2-D `numpy` arrays will be used to hold the Jacobian matrix. # # ### Class Implementation # * The `Ad_Var` class will represent the variables that are used in the Automatic Differentiation process. In the case of a single input, the instance should be initialized with, `val`, a scalar value of that variable to be evaluated on when calculating both the function and derivative values (as shown in the demo above) # # * In the case of multiple inputs, each input will be initialized as an `Ad_Var` object, with inputs `val`, a scalar value of that variable and `ders`, a `numpy` array representing the derivative of the input with regards to the other variables. An example is shown below: x1 = Ad_Var(1, np.array([1, 0, 0])) x2 = Ad_Var(2, np.array([0, 1, 0])) x3 = Ad_Var(3, np.array([0, 0, 1])) # * Dunder methods such as "add" and "mul", and other elementary functions will be implemented under this class. More information on this is covered below in the *Class Methods* section. # # * As part of the class methods, we have included two static methods, `get_jacobian` and `get_values`, which respectively compute the Jacobian matrix and an array of function values for an array of `Ad_Var` objects. # # * In our implementation, we will also use the try-except method to catch unexpected input types: for example, if the user initializes the variable value of the `Ad_Var` instance with a value of type string, which is not a valid input type. # ### Core Attributes # * `val`: float value, indicating the function value of the `Ad_Var` object evaluated at the given point # * `ders` (for single input): float value, indicating the derivative value of `Ad_Var` object evaluated at the given point # * `ders` (for multiple inputs): 1-D array of floats, representing the value of the derivatives of the multiple inputs evaluated at the given point # # * `val` and `ders` attributes will be made pseudoprivate to prevent users from manually setting function and derivative values outside of instance initialization # # ### External Dependencies # * `numpy` for implementation of the elementary functions (e.g. sin, sqrt, log and exp), by overloading `numpy` implementations for these functions # * `pytest` for testing # * TravisCI and CodeCov used to manage continuous integration and code coverage # # ### Class Methods # # 1. `__init__(self, val, ders=1)`: # * Sets self._val to the argument `val` # * Sets self._ders to the argument `ders` # # # 2. `__eq__(self, other)`: # * Returns True if self.val == other.val and self.ders == other.ders, returns False otherwise # # # 3. `_repr__(self)`: # * Returns a string representing the value of `self._val` (Value) and the value of `self._ders` (Gradient) # # # 4. `get_val(self)`: # * Returns the value of the attribute `self._val` # # # 5. `get_ders(self)`: # * Returns the value of the attribute `self._ders` # # # 6. `__add__(self, other)` and `__radd__(self, other)`: # * Other can be a float, int or AutoDiff object # * Returns an `Ad_Var` object when calculating self + other or other + self # # # 7. `__sub__(self, other)` and `__rsub__(self, other)`: # * Other can be a float, int or AutoDiff object # * Returns an `Ad_Var` object when calculating self - other or other - self # # # 8. `__mul__(self, other)` and `__rmul__(self, other)`: # * Other can be a float, int or AutoDiff object # * Returns an `Ad_Var` object when calculating self * other or other * self # # # 9. `__truediv__(self, other)` and `__rtruediv__(self, other)`: # * Other can be a float, int or AutoDiff object # * Returns an `Ad_Var` object when calculating self / other or other / self # # # 10. `__pow__(self, other)` and `__rpow__(self, other)`: # * `other` can be a float, int or `Ad_Var` object # * `__rpow__` will require `other` to be a numeric type, otherwise, it will raise a TypeError # * Returns an `Ad_Var` object when calculating self ** other # # # 11. `sqrt(self)`: # * Returns an `Ad_Var` object by calling the __pow__ method using self**0.5 # # # 12. `exp(self)`: # * Returns an `Ad_Var` object with `self._val = np.exp(self._val)` and `self._deres = np.exp(self._val) * self._ders` # # # 13. `log(self, logbase=np.e)`: # * Optional argument for `logbase` (can be a float or int). By default, `logbase` is set to the exponential. # * Returns an `Ad_Var` object with `self._val = np.log(self._val) / np.log(logbase)` and `self._ders = self._ders / (self._val * np.log(logbase))` # # # 14. `sin(self)` and `cos(self)` and `tan(self)`: # * Returns an `Ad_Var` object with `self._val` and `self._ders` updated accordingly based on the given trigonometric function # # # 15. `arcsin(self)` and `arccos(self)` and `arctan(self)`: # * Returns an `Ad_Var` object with `self._val` and `self._ders` updated accordingly based on the given inverse trigonometric function # # # 16. `get_jacobian(functions_array, functions_dim, vars_dim)`: # * Static method that returns the Jacobian matrix for a given array of `Ad_Var` objects # # # 17. `get_values(functions_array)`: # * Static method that returns the an array of function values for a given array of `Ad_Var` objects # ## Future Features # # Our next steps will be to implement the reverse mode of Automatic Differentiation. The main challenge for this is to translate the conceptual framework of reverse mode into functioning code that builds on existing code. This might require creating a new class specifically for the reverse mode that replicates similar functions already in the existing `Ad_Var`. # # Building on our implementation of reverse mode of Auto Differentiation, a future feature that we plan to implement would be a feature that automatically chooses between the forward mode or reverse mode of Automatic Differentiation, based on what is optimal based on the number of parameters and functions. This will be presented to the user of the package to facilitate their decision-making. # # In terms of the forward mode of Auto Differentiation, we will work on refining the `get_jacobian` method that would return the Jacobian matrix for an array of `Ad_Var` objects. This will allow the user to access the Jacobian via the `Ad_Var` class. We will also include doc-strings for the code, to ensure that the code is accessible to the user. # #### References # # * [A Hitchhiker’s Guide to Automatic Differentiation](https://link.springer.com/article/10.1007/s11075-015-0067-6) # * Harvard CS207 2019 course materials
docs/milestone2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # "Hello world" in Python! # + [markdown] slideshow={"slide_type": "subslide"} # When programmers are learning a new language, we tend to write a one-line program that prints some version of the message "Hello world!" this is a simple program that shows whether your computer is properly set up to run Python programs. # + slideshow={"slide_type": "fragment"} print('Hello Python world!') # + [markdown] slideshow={"slide_type": "fragment"} # If it works, congratulations! You just ran your first Python program.
01 Hello World.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="Tce3stUlHN0L" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="tuOe1ymfHZPu" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="qFdPvlXBOdUN" # # Estimators # + [markdown] colab_type="text" id="MfBg1C5NB3X0" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/estimators"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/estimators/index.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/estimators/index.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/estimators/index.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="oEinLJt2Uowq" # This document introduces `tf.estimator`—a high-level TensorFlow # API that greatly simplifies machine learning programming. Estimators encapsulate # the following actions: # # * training # * evaluation # * prediction # * export for serving # # You may either use the pre-made Estimators we provide or write your # own custom Estimators. All Estimators—whether pre-made or custom—are # classes based on the `tf.estimator.Estimator` class. # # For a quick example try [Estimator tutorials](../../tutorials/estimators/linear.ipynb). For an overview of the API design, see the [white paper](https://arxiv.org/abs/1708.02637). # # Note: TensorFlow also includes a deprecated `Estimator` class at # `tf.contrib.learn.Estimator`, which you should not use. # + [markdown] colab_type="text" id="yQ8fQYt_VD5E" # ## Estimator advantages # # Estimators provide the following benefits: # # * You can run Estimator-based models on a local host or on a distributed multi-server environment without changing your model. Furthermore, you can run Estimator-based models on CPUs, GPUs, or TPUs without recoding your model. # * Estimators simplify sharing implementations between model developers. # * You can develop a state of the art model with high-level intuitive code. In short, it is generally much easier to create models with Estimators than with the low-level TensorFlow APIs. # * Estimators are themselves built on `tf.keras.layers`, which simplifies customization. # * Estimators build the graph for you. # * Estimators provide a safe distributed training loop that controls how and when to: # * build the graph # * initialize variables # * load data # * handle exceptions # * create checkpoint files and recover from failures # * save summaries for TensorBoard # # When writing an application with Estimators, you must separate the data input # pipeline from the model. This separation simplifies experiments with # different data sets. # + [markdown] colab_type="text" id="sXNBeY-oVxGQ" # ## Pre-made Estimators # # Pre-made Estimators enable you to work at a much higher conceptual level than the base TensorFlow APIs. You no longer have to worry about creating the computational graph or sessions since Estimators handle all the "plumbing" for you. Furthermore, pre-made Estimators let you experiment with different model architectures by making only minimal code changes. `tf.estimator.DNNClassifier`, for example, is a pre-made Estimator class that trains classification models based on dense, feed-forward neural networks. # # ### Structure of a pre-made Estimators program # # A TensorFlow program relying on a pre-made Estimator typically consists of the following four steps: # # #### 1. Write one or more dataset importing functions. # # For example, you might create one function to import the training set and another function to import the test set. Each dataset importing function must return two objects: # # * a dictionary in which the keys are feature names and the values are Tensors (or SparseTensors) containing the corresponding feature data # * a Tensor containing one or more labels # # For example, the following code illustrates the basic skeleton for an input function: # # ``` # def input_fn(dataset): # ... # manipulate dataset, extracting the feature dict and the label # return feature_dict, label # ``` # # See [data guide](../../guide/data.md) for details. # # #### 2. Define the feature columns. # # Each `tf.feature_column` identifies a feature name, its type, and any input pre-processing. For example, the following snippet creates three feature columns that hold integer or floating-point data. The first two feature columns simply identify the feature's name and type. The third feature column also specifies a lambda the program will invoke to scale the raw data: # # ``` # # Define three numeric feature columns. # population = tf.feature_column.numeric_column('population') # crime_rate = tf.feature_column.numeric_column('crime_rate') # median_education = tf.feature_column.numeric_column( # 'median_education', # normalizer_fn=lambda x: x - global_education_mean) # ``` # For further information, it is recommended to check this [tutorial](https://www.tensorflow.org/tutorials/keras/feature_columns). # # #### 3. Instantiate the relevant pre-made Estimator. # # For example, here's a sample instantiation of a pre-made Estimator named `LinearClassifier`: # # ``` # # Instantiate an estimator, passing the feature columns. # estimator = tf.estimator.LinearClassifier( # feature_columns=[population, crime_rate, median_education]) # ``` # For further information, it is recommended to check this [tutorial](https://www.tensorflow.org/tutorials/estimators/linear). # # #### 4. Call a training, evaluation, or inference method. # # For example, all Estimators provide a `train` method, which trains a model. # # ``` # # `input_fn` is the function created in Step 1 # estimator.train(input_fn=my_training_set, steps=2000) # ``` # You can see an example of this below. # # ### Benefits of pre-made Estimators # # Pre-made Estimators encode best practices, providing the following benefits: # # * Best practices for determining where different parts of the computational graph should run, implementing strategies on a single machine or on a # cluster. # * Best practices for event (summary) writing and universally useful # summaries. # # If you don't use pre-made Estimators, you must implement the preceding features yourself. # + [markdown] colab_type="text" id="oIaPjYgnZdn6" # ## Custom Estimators # # The heart of every Estimator—whether pre-made or custom—is its *model function*, which is a method that builds graphs for training, evaluation, and prediction. When you are using a pre-made Estimator, someone else has already implemented the model function. When relying on a custom Estimator, you must write the model function yourself. # # ## Recommended workflow # # 1. Assuming a suitable pre-made Estimator exists, use it to build your first model and use its results to establish a baseline. # 2. Build and test your overall pipeline, including the integrity and reliability of your data with this pre-made Estimator. # 3. If suitable alternative pre-made Estimators are available, run experiments to determine which pre-made Estimator produces the best results. # 4. Possibly, further improve your model by building your own custom Estimator. # + colab={} colab_type="code" id="l0QKHuEJ4Kc_" from __future__ import absolute_import, division, print_function, unicode_literals # + colab={} colab_type="code" id="kRr7DGZxFApM" try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass import tensorflow as tf # + colab={} colab_type="code" id="IqR2PQG4ZaZ0" import tensorflow_datasets as tfds tfds.disable_progress_bar() # + [markdown] colab_type="text" id="P7aPNnXUbN4j" # ## Create an Estimator from a Keras model # # You can convert existing Keras models to Estimators with `tf.keras.estimator.model_to_estimator`. Doing so enables your Keras # model to access Estimator's strengths, such as distributed training. # # Instantiate a Keras MobileNet V2 model and compile the model with the optimizer, loss, and metrics to train with: # + colab={} colab_type="code" id="XE6NMcuGeDOP" keras_mobilenet_v2 = tf.keras.applications.MobileNetV2( input_shape=(160, 160, 3), include_top=False) estimator_model = tf.keras.Sequential([ keras_mobilenet_v2, tf.keras.layers.Flatten(), tf.keras.layers.Dense(1, activation='softmax') ]) # Compile the model estimator_model.compile( optimizer='adam', loss='binary_crossentropy', metric='accuracy') # + [markdown] colab_type="text" id="A3hcxzcEfYfX" # Create an `Estimator` from the compiled Keras model. The initial model state of the Keras model is preserved in the created `Estimator`: # + colab={} colab_type="code" id="UCSSifirfyHk" est_mobilenet_v2 = tf.keras.estimator.model_to_estimator(keras_model=estimator_model) # + [markdown] colab_type="text" id="8jRNRVb_fzGT" # Treat the derived `Estimator` as you would with any other `Estimator`. # + colab={} colab_type="code" id="Rv9xJk51e1fB" IMG_SIZE = 160 # All images will be resized to 160x160 def preprocess(image, label): image = tf.cast(image, tf.float32) image = (image/127.5) - 1 image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE)) return image, label # + colab={} colab_type="code" id="Fw8OjwujVBkc" def train_input_fn(batch_size): data = tfds.load('cats_vs_dogs', as_supervised=True) train_data = data['train'] train_data = train_data.map(preprocess).shuffle(500).batch(batch_size) return train_data # + [markdown] colab_type="text" id="JMb0cuy0gbTi" # To train, call Estimator's train function: # + colab={} colab_type="code" id="4JsvMp8Jge80" est_mobilenet_v2.train(input_fn=lambda: train_input_fn(32), steps=500) # + [markdown] colab_type="text" id="jvr_rAzngY9v" # Similarly, to evaluate, call the Estimator's evaluate function: # + colab={} colab_type="code" id="kVNPqysQgYR2" est_mobilenet_v2.evaluate(input_fn=lambda: train_input_fn(32), steps=10) # + [markdown] colab_type="text" id="5HeTOvCYbjZb" # For more details, please refer to the documentation for `tf.keras.estimator.model_to_estimator`.
site/en/guide/estimators/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import os import glob import pickle import copy import pprint from peewee import * from playhouse.apsw_ext import APSWDatabase from playhouse.migrate import SqliteMigrator, migrate paths = glob.glob("/Users/Venky/Work/UIowa/DEEPN/dictionaries/*.p") # # Database Definition database = APSWDatabase(None, pragmas={'journal_mode': 'off', 'cache_size': -500*1000, 'ignore_check_constraints': 0, 'synchronous': 0, 'foreign_keys': 1, 'temp_store': 'memory', 'locking_mode': 'exclusive'}) def create_class(class_name, parent=Model): NewClass = type(class_name, (parent,), { "start": IntegerField(), "stop": IntegerField(), "chromosome_name": CharField(), "gene_name": CharField(), "exontype": BooleanField(), }) NewClass._meta.database = database NewClass._meta.table_name = class_name return NewClass database.init("/Users/Venky/Work/UIowa/DEEPN/dictionaries/exons.db") for path in paths: table = create_class(os.path.basename(os.path.basename(path).replace(".p", ""))) print "Creating table: ", table._meta.table_name database.create_tables([table]) exon_list = pickle.load(open(path, "rb")) for ch in exon_list.keys(): print "Chromosome: ", ch for exon in exon_list[ch].keys(): if not table.select().where(table.start==exon[0], table.stop==exon[1], table.chromosome_name==exon[2], table.gene_name==exon[4], table.exontype==True if exon[-1] == 'Y' else False): table.insert(start=exon[0], stop=exon[1], chromosome_name=exon[2], gene_name=exon[4], exontype=True if exon[-1] == 'Y' else False).execute() database.close()
notebooks/convert-dot-p-files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import sympy as sp import scipy as sc import matplotlib.pyplot as plt from scipy.optimize import fsolve import scipy.optimize as op from mpl_toolkits.mplot3d import axes3d from scipy.io import loadmat import librosa import plotly.graph_objects as go from IPython.display import Audio import plotly.express from audio2numpy import open_audio from librosa import display # %matplotlib notebook # $PROBLEM$ $1$ # $Part$ $A$ #plotting the sawtooth function for x between 0 and 2 resolution = 0.0001 x = np.arange(0,2,resolution) square = np.zeros_like(x) squarefft = np.zeros_like(x) square[:int(x.size/2)] = x[:10000] square[int(x.size/2):]= 0 print("The Original Sawtooh Function") plt.figure() plt.plot(x,square) plt.show() # $Part$ $B$ #approximating the sawtooth function with fourier series. #for k=1,3,5,10,25,100 all plotted under one graph with different colors resolution = 0.0001 x = np.arange(0,2,resolution) square = np.zeros_like(x) squarefft = np.zeros_like(x) square[:int(x.size/2)] = x[:10000] square[int(x.size/2):]= 0 plt.figure() plt.plot(x,square,label='Original Function') a0=np.trapz(square,x)/np.pi/2 squarefft=squarefft+a0 n=100 for i in range(n): i=i+1 a=np.trapz(square*np.sin(i*x),x)/np.pi b=np.trapz(square*np.cos(i*x),x)/np.pi squarefft=squarefft+a*np.sin(i*x)+b*np.cos(i*x) if i==1: plt.plot(x,squarefft,label='For K=1') if i==3: plt.plot(x,squarefft, label='For K=3') if i==5: plt.plot(x,squarefft, label='For K=5') if i==10: plt.plot(x,squarefft, label='For K=10') if i==25: plt.plot(x,squarefft, label='For K=25') if i==100: plt.plot(x,squarefft, label='For K=100') #we can see that for K=100 light pink approximates better than all for the function plt.legend() plt.show() # $PROBLEM$ $2$ # $Part$ $A$ from scipy.io import loadmat data = loadmat('dataF.mat') s=data['s'].reshape((-1,1)) N=len(s) dt=1/Fs T=N*dt print("Sample Freq =",Fs,", N =",len(s),", dt=",dt) t=np.linspace(0,T,N) #from graph we can clearly see from graph the Period (T) is 0.1 seconds #frequency is 1/T Freq=1/0.1 print("Frequency is",Freq,"Hz") plt.figure() plt.ylabel("Amplitude") plt.xlabel("Time [s]") plt.plot(t,t*0,t,s) plt.show() # $Part$ $B$ #the first 7 frequencies in the fourie series for k in range(7): print("f",k,"=",(Fs*k)/N,"Hz",sep='') # $Part$ $C$ # + #calculating Coefficients by formula i=1j t=np.linspace(0,dt,N) fre=np.array([0,2,4,6,8,10,12]).reshape((-1,1)) exp=np.zeros(7,dtype=np.complex_) c=np.zeros(7,dtype=np.complex_) for m in range(7): exp[m]=np.exp(-i*2*np.pi*fre[m]*t[m]) for m in range(7): c[m]=np.sum(s[:,0]*exp[m]) for m in range (7): print("c",m,"=",c[m],sep='') # - # $Part$ $D$ #power Energy is for k in range(7): print("P",k,"=",np.absolute(C[k])**2/N,sep='') # $Part$ $E$ #the frequencey with max energy is f5 with P5=.00025, we can also confirm this in part g graph print("The frequency with max enegy is f5=10Hz") # $Part$ $F$ C=(np.fft.fft(s[:,0]))/N for k in range(7): print("C",k,"=",C[k],sep='') # $Part$ $G$ dt=1/2000 n=1000 C=np.fft.fft(s[:,0])/n freq=np.linspace(0,1/dt,n) freq=np.arange(0,1000,2).reshape((-1,1)) #the the peak at the right value about 10HZ print("The peak which has most presence is about 10Hz from graph") plt.figure() plt.ylim(-0.00001,0.0006) #to take a closer look at the graph plt.xlim(0, 200) #to take a closer look at the graph plt.ylabel("Amplitude") plt.xlabel("Frequency [Hz]") plt.plot(freq[:n//2],np.abs(C)[:n//2]*1/n) plt.show() # $PROBLEM$ $3$ # $Part$ $A$ sr=data['sr'].reshape((-1,1)) s=data['s'].reshape((-1,1)) # print("1000 samples were taken with Fs of 2000, thus time was 0.5 sec") t=np.arange(0,0.5,0.0005).reshape((-1,1)) Fs=2000 plt.figure() plt.plot(t,t*0,t,sr) plt.show() # $Part$ $B$ f = np.fft.fft(sr[:,0]) N=1000 plt.figure() plt.ylabel("Amplitude") plt.xlabel("Frequency [Hz]") plt.plot(freq[:N//2],np.abs(f)[:N//2]*1/N) plt.show() # $Part$ $C$ print("From graph we can estimate the 4 peaks\n ") # F1=25Hz # F2=65Hz # F3=120Hz # F4=200Hz print("F1=25Hz");print("F2=65Hz");print("F3=120Hz");print("F4=200Hz") # $PROBLEM$ $4$ # $Part$ $A$ from scipy.io import wavfile samplerate, data = wavfile.read('wilhelm.wav') s=data[:,0] # Fs=44100 #given sampling frequency print("Samplerate =",samplerate,", N =",len(data)) N=len(data) T=len(data)/samplerate t=np.linspace(0,T,N) plt.figure() plt.ylabel("Data") plt.xlabel("Time [s]") plt.plot(t,s) # $Part$ $B$ freq = np.linspace(0, samplerate, N) f = np.fft.fft(s) plt.figure() plt.ylabel("Amplitude") plt.xlabel("Frequency [Hz]") print("From graph the prominent frewuencies are about F1=1400Hz and F2=3000Hz") plt.plot(freq[:N//2],np.abs(f)[:N//2]*1/N) plt.show()
assignments/assignment05/assignment05_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Experiment Collection #04 # # This notebook contains experiments regarding the impact of different parts of the observation space. The problem is the stochastic version. # ## 1. Basic Setup # Jupyter setup # %load_ext autoreload # %autoreload 2 # %config IPCompleter.greedy=True # + import ray import ray.rllib import ray.tune import solara.envs.creator ## Initialising ray (starts background process for distributed computing) ray.shutdown() ray.init(logging_level="WARNING", object_store_memory= 25 * 10**9) # Adding environment creator function to ray ray.tune.registry.register_env("battery_control", solara.envs.creator.create_env) # Output format of figures OUT_FORMAT = ".svg" # - # ## 2. Experiment Definition # + from solara.constants import PROJECT_PATH import solara.utils.rllib EXPERIMENT_NAME = "experiment_05_hyperparameter_tuning" # RL environment configuration ENV_CONFIG = { 'general': { 'type': 'battery_control.BatteryControlEnv', 'infeasible_control_penalty': True, 'grid_charging': True, 'obs_keys': ["load", "pv_gen", "battery_cont", "time_step"], 'logging_level': "RAY", # if using RLlib, set to 'RAY' }, 'components': { 'battery': { 'type': 'LithiumIonBattery', 'size': 10, 'chemistry': 'NMC', 'time_step_len': 1, }, 'solar': { 'type': 'DataPV', 'data_path': PROJECT_PATH + "/data/ausgrid/processed/house2_solar_gen.txt", 'fixed_sample_num': None, }, 'load': { 'type': 'DataLoad', 'data_path': PROJECT_PATH + "/data/ausgrid/processed/house2_combined_load.txt", 'fixed_sample_num': None, }, 'grid': { 'type': 'PeakGrid', 'peak_threshold': 1.0, }, }, } # RL agent configuration AGENT_CONFIG = { "env": "battery_control", "env_config": ENV_CONFIG, "gamma": 1, "num_sgd_iter": ray.tune.lograndint(1, 64), "lr": ray.tune.qloguniform(1e-6, 1e-1, 1e-6), "train_batch_size": ray.tune.lograndint(8, 4000), "sgd_minibatch_size": ray.tune.lograndint(64, 256), #"entropy_coeff": ray.tune.quniform(0, 0.01, 1e-5), #"clip_param": ray.tune.quniform(0.1, 1, 0.05), "model": { "fcnet_hiddens": [256, 256, 256, 256], #ray.tune.choice([[256, 256, 256, 256],[256, 256, 256],[64, 64]]), "fcnet_activation": "relu", #ray.tune.choice(["tanh", "relu"]), "post_fcnet_activation": "tanh", }, # Utilities settings "framework": "torch", "log_level": "WARNING", #"num_workers": 9, #"num_gpus": 1, "callbacks": solara.utils.rllib.InfoCallback, "seed" : ray.ray.tune.randint(0, 10000000), #"rerun_num":ray.ray.tune.grid_search([1,2]) } # Full experiment configuration including RL algorithm type EXPERIMENT_CONFIG = { "run_or_experiment": "PPO", "config": AGENT_CONFIG, "stop": {"time_total_s": 12*100}, #{"training_iteration": 100}, "name": EXPERIMENT_NAME, "local_dir": "./tmp/tune/", "log_to_file": True, "checkpoint_freq": 1000, "checkpoint_at_end": True, } # Other settings PLOT_DIR = PROJECT_PATH + "/figures/experiments/" # + # Parallelisation Setup if False: num_workers = 4 gpu_count = 1 reserved_capacity = 0.01 # Driver GPU num_gpus_per_worker = (gpu_count - reserved_capacity) / num_workers AGENT_CONFIG["num_workers"] = num_workers AGENT_CONFIG["num_gpus"] = num_gpus AGENT_CONFIG["num_envs_per_worker"]= 8 #AGENT_CONFIG["num_gpus"] = 1 #AGENT_CONFIG["num_envs_per_worker"]= 8 AGENT_CONFIG["num_workers"] = 10 AGENT_CONFIG["num_gpus"] = 1 #AGENT_CONFIG["remote_worker_envs"]= True # - # ## 3. Running Experiment # + # Setting visualisation in notebook reporter = ray.tune.JupyterNotebookReporter(overwrite=True) reporter.add_metric_column("custom_metrics/cost_mean") reporter.add_metric_column("custom_metrics/power_diff_mean") # Running experiment analysis = ray.tune.run( progress_reporter=reporter, **EXPERIMENT_CONFIG, num_samples=150, #resume=True ) # - # ## 4. Visualisation # + import os import solara.plot.pyplot import matplotlib.pyplot as plt exp_path = EXPERIMENT_CONFIG["local_dir"] + EXPERIMENT_CONFIG["name"] + "/" #exp_path = "./tmp/tune/PPO/" state_files = [filename for filename in os.listdir(exp_path) if "experiment_state" in filename ] last_state_file = sorted(state_files, reverse=True)[0] analysis = ray.tune.ExperimentAnalysis(experiment_checkpoint_path=exp_path + last_state_file) trials = analysis.fetch_trial_dataframes() trials = {key: trials[key] for key in sorted(trials.keys())} # Sort trials # + # Creating helper function for plotting import numpy as np def plot_trials(trials, necessary_cond=None, other_conditions=None, selected_labels=None, experiment_name="default_experiment", plot_name = "plot_00_default", plot_dir = "./figures", trace_start=0, trace_end=200, metric="custom_metrics/cost_mean", x_label="Average cost per episode (\$)", optimal_value=None): """Plot progress over iterations for experiments.""" # Default settings solara.plot.pyplot.default_setup() # Other settings x_values = np.arange(trace_start+1,trace_end+1) ticks_gap = 25 x_ticks = [trace_start+1] + list(np.arange((trace_start//ticks_gap + 1)*ticks_gap,trace_end+1, ticks_gap)) + [trace_end] other_conditions = list(other_conditions) traces = {} for trial_name, trial_data in trials.items(): if necessary_cond is not None: contains_nec = all([necessary[1] in trial_name for necessary in necessary_cond]) #print([necessary[1] in trial_name for necessary in necessary_cond]) if necessary_cond is None or contains_nec: label = "" for i, (cond_label, condition) in enumerate(other_conditions): if condition in trial_name: label += cond_label else: pass #label += "no " + cond_label if i < len(other_conditions) - 1: pass #label += ", " label = label.capitalize() if selected_labels is None or label in selected_labels: trace = trial_data[metric][trace_start:trace_end] #print(trial_data.keys()) if label in traces: traces[label].append(trace) else: traces[label] = [trace] #plt.plot(x_values,trace, label=label) for cond in other_conditions: label = cond[0] trace_list = traces[label] plt_trace = np.mean(trace_list, axis=0) plt.plot(x_values,plt_trace, label=label) if optimal_value is not None: plt.plot(x_values,np.ones(len(x_values))*optimal_value, label="Optimal control (avg)", color="lightgrey") #plt.semilogy() plt.legend() plt.xlabel("Training iteration") plt.ylabel(x_label) plt.xticks(x_ticks) plt.savefig(fname=plot_dir + experiment_name + "_" + plot_name + OUT_FORMAT) # + STR_OBS_KEYS_OPTIONS = [str(obs_keys) for obs_keys in OBS_KEYS_OPTIONS] LABELS = [ "Quad. obs. space", "Add cum. load", "Add cum solar gen.", "Add both cum. values", ] CONDS = list(zip(LABELS, STR_OBS_KEYS_OPTIONS)) STR_OBS_KEYS_OPTIONS # + plot_trials(trials, necessary_cond=None, other_conditions=CONDS, experiment_name=EXPERIMENT_NAME, plot_dir=PLOT_DIR, plot_name="plot_01_obs_ablation_study", trace_start= 0, trace_end=200, optimal_value=3.3682) # - plot_trials(trials, necessary_cond=None, other_conditions=CONDS, experiment_name=EXPERIMENT_NAME, plot_dir=PLOT_DIR, plot_name="plot_02_obs_ablation_closeup", trace_start= 149, trace_end=200) plot_trials(trials, necessary_cond=[["","['load', 'pv_gen', 'battery_cont', 'time_step', 'cum_load', 'cum_pv_gen']"]], other_conditions=[["2286394","seed=2286394"], ["7615519","seed=7615519"]], experiment_name=EXPERIMENT_NAME, plot_dir=PLOT_DIR, plot_name="plot_02_obs_ablation_closeup", trace_start= 149, trace_end=200) plot_trials(trials, necessary_cond=None, other_conditions=CONDS, experiment_name=EXPERIMENT_NAME, plot_dir=PLOT_DIR, plot_name="plot_03_penalty", trace_start= 149, trace_end=200, metric="custom_metrics/power_diff_mean", x_label="Average penalty per episode") plot_trials(trials, necessary_cond=None, other_conditions=CONDS, experiment_name=EXPERIMENT_NAME, plot_dir=PLOT_DIR, plot_name="plot_04_battery_cont", trace_start= 149, trace_end=200, metric="custom_metrics/battery_cont_mean", x_label="Avg. cum. battery content per ep.") ['episode_reward_max', 'episode_reward_min', 'episode_reward_mean', 'episode_len_mean', 'episodes_this_iter', 'num_healthy_workers', 'timesteps_total', 'agent_timesteps_total', 'done', 'episodes_total', 'training_iteration', 'experiment_id', 'date', 'timestamp', 'time_this_iter_s', 'time_total_s', 'pid', 'hostname', 'node_ip', 'time_since_restore', 'timesteps_since_restore', 'iterations_since_restore', 'trial_id', 'custom_metrics/cost_mean', 'custom_metrics/cost_min', 'custom_metrics/cost_max', 'custom_metrics/power_diff_mean', 'custom_metrics/power_diff_min', 'custom_metrics/power_diff_max', 'custom_metrics/battery_cont_mean', 'custom_metrics/battery_cont_min', 'custom_metrics/battery_cont_max', 'hist_stats/episode_reward', 'hist_stats/episode_lengths'] # + # Helper functions def get_episode_data_from_checkpoint(exp_path: str, iteration_num: int): """Get episode data from loading policy from certain iteration of experiment.""" trial_agent_config = analysis.get_all_configs()[exp_path] # Remove some unnecessary configs that may stop re-loading trial_agent_config.pop("callbacks") trial_agent_config.pop("num_gpus") agent = ray.rllib.agents.ppo.PPOTrainer(config=trial_agent_config) check_range=iteration_num episodes_data = solara.utils.rllib.run_episodes_from_checkpoints(agent=agent, check_save_path=exp_path, check_range=check_range) if len(episodes_data) == 1: return episodes_data[0] else: return episodes_data def get_experiment_path(trials, obs_keys): """Get experiment paths""" exp_path = [trial_path for trial_path in trials.keys() if obs_keys in trial_path][0] return exp_path # + # Plotting configuration ## Lines to draw in policy plot POLICY_PLOT_CONF = { "selected_keys": ['load','pv_gen','energy_cont','net_load', 'charging_power','cost','price_threshold', 'actions'], "y_min":-1.3, "y_max":1.4, "show_grid":False, } # + exp_path = get_experiment_path(trials, obs_keys = "['load', 'pv_gen', 'battery_cont', 'time_step', 'cum_load', 'cum_pv_gen']") episode_data = get_episode_data_from_checkpoint(exp_path, iteration_num=150) solara.plot.pyplot.plot_episode(episode_data,title=None, **POLICY_PLOT_CONF) plt.savefig(fname=PLOT_DIR + EXPERIMENT_NAME + "_plot_05_policy_iter150_no_grid_no_penalty_failure" + OUT_FORMAT, bbox_inches='tight') # -
notebooks/exploratory/rdnfn_029_experiment_collection_05_hyperparameter_tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Collaborative filtering using Non-Negative Matrix Factorization (NMF) import pandas as pd import numpy as np from numpy.linalg import norm, solve import time import matplotlib.pyplot as plt import math # ### Download the Movielens Dataset # + # # !curl -O http://files.grouplens.org/datasets/movielens/ml-100k.zip # # !unzip ml-100k.zip # if you dont have unzip: apt-get install unzip # # !cd ml-100k/ml-100k # - # ### Create the Ratings Matrix names = ['user_id', 'item_id', 'rating', 'timestamp'] df = pd.read_csv(r"./ml-100k/ml-100k/u.data", sep='\t', names=names) df.head() # We see that there are 943 users and 1682 items ratings_matrix = np.empty((df.user_id.nunique(), df.item_id.nunique())).astype(np.float32) for row in df.index: ratings_matrix[df.iloc[row].user_id-1, df.iloc[row].item_id-1] = df.iloc[row].rating print ('number of users = {}'.format(df.user_id.nunique())) print ('number of items = {}'.format(df.item_id.nunique())) # We see that the dataset contains many empty entries zeroentries = (1-np.count_nonzero(ratings_matrix) /(ratings_matrix.shape[0]*ratings_matrix.shape[1]))*100 print ('Zero entries = {:4.2f}%'.format(zeroentries)) # We see that most of the entries in the ratings matrices are zeros # ### We develop a simple class for performing ALS # Our objective is to find $W$ and $H$ such that, # $$ R \approx WH$$ # where $R$ is the ratings matrix and $W$, and $H$ are the latent factor matrices for users and items respectively. # # We follow [1], and use the following multiplicative update rule: # $$ H_{a \mu} = H_{a \mu} \frac{(W^{T} R)_{a \mu}} {(W^{T} W H)_{a \mu}}$$ # and, # $$ W_{ia} = W_{ia} \frac{(RH^{T})_{ia}}{(WHH^{T})_{ia}} $$ # # This minimizes the Eucledian distance, $||R - WH||$. Strictly speaking, under this update rule the Eucledian distance is non-increasing. We can also employ another multiplicate update rule under which KL divergence (i.e. $D(R || WH)$ is non-increasing. # # # [1] Lee, <NAME>., and H. <NAME>. "Algorithms for non-negative matrix factorization." Advances in neural information processing systems. 2001 # # + class NMF: def __init__(self): """ description: a simple class for performing matrix factorization using NMF """ self.n_factors = 0 # number of latent factors def set_factors(self, n_factors): """ description: sets the number of latent factors """ self.n_factors = n_factors return self def set_initial_factor_matrices(self, nusers, nitems): """ description: sets the initial latent-factor matrices """ self.W = np.random.random((nusers, self.n_factors)).astype(np.float32) self.H = np.random.random((self.n_factors, nitems)).astype(np.float32) return self def get_n_factors(self): """ description: gets the number of latent factors """ return self.n_factors def get_factor_matrices(self): """ description: gets user and item latent factor matrices """ return self.W, self.H def get_loss(self, R): """ description: gets the MSE loss """ return norm(R - np.matmul(self.W, self.H)) def update(self, R): """ description: updates matrices W, H """ # update H nr = np.matmul(self.W.T, R) dr = np.matmul(np.matmul(self.W.T, self.W), self.H) self.H = self.H * np.divide (nr, dr) #update W nr = np.matmul(R, self.H.T) dr = np.matmul(np.matmul(self.W, self.H), self.H.T) self.W = self.W * np.divide (nr, dr) # + # number of latent factors n_factors = 40 # construct the NMF object nmf = NMF().set_factors(n_factors)\ .set_initial_factor_matrices(ratings_matrix.shape[0], ratings_matrix.shape[1]) #set epochs epochs = 1000 W_norm = [] H_norm = [] L = [] # training W, H = nmf.get_factor_matrices() t0 = time.time() for i in range(epochs): # update W, H nmf.update(ratings_matrix) W_new, H_new = nmf.get_factor_matrices() # gather stats for loss L.append(nmf.get_loss(ratings_matrix)) W_norm.append(norm((W_new - W))) H_norm.append(norm((H_new - H))) W, H = W_new, H_new print("Time taken for the run = {:5.2f} s".format(time.time()-t0)) # - # ### We plot the MSE loss function as well the convergence of factor matrices (W, H) plt.plot(np.log(W_norm), 'o', np.log(H_norm),'s') plt.xlabel('# iterations') plt.ylabel('$Log ||W_{n+1} - W_{n}||_{2}$', fontsize=20) plt.legend(['W','H']) plt.show() mseL = np.array(L)/(ratings_matrix.shape[0]*ratings_matrix.shape[1]) plt.plot(mseL, 'o') plt.xlabel('# iterations') plt.ylabel('$||R - WH||_{2}$', fontsize=20) plt.show() # ##
Simple_NMF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Displaying Surfaces # py3Dmol supports the following surface types: # # * VDW - van der Waals surface # * MS - molecular surface # * SES - solvent excluded surface # * SAS - solvent accessible surface import py3Dmol # # Add surface # In the structure below (HLA complex with antigen peptide pVR), we add a solvent excluded surface (SES) to the heavy chain to highlight the binding pocket for the antigen peptide (rendered as spheres). # + viewer = py3Dmol.view(query='pdb:5XS3') heavychain = {'chain':'A'} lightchain = {'chain':'B'} antigen = {'chain':'C'} viewer.setStyle(heavychain,{'cartoon':{'color':'blue'}}) viewer.setStyle(lightchain,{'cartoon':{'color':'yellow'}}) viewer.setStyle(antigen,{'sphere':{'colorscheme':'orangeCarbon'}}) viewer.addSurface(py3Dmol.SES,{'opacity':0.9,'color':'lightblue'}, heavychain) viewer.show() # -
1-3D-visualization/4-Surfaces.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sergio-cabrales/python/blob/main/SP500.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="5vit6UQzdmhW" outputId="8d71c1ad-c2dc-4498-d579-dd39e48c5a59" pip install fix_yahoo_finance # + colab={"base_uri": "https://localhost:8080/"} id="HJz1divReJbb" outputId="9243e6ed-6187-429c-a7cd-88cbdd064e77" pip install yfinance # + id="jYRIkI8sXdXI" import bs4 as bs import datetime as dt import os from pandas_datareader import data as pdr import pickle import requests import fix_yahoo_finance as yf yf.pdr_override() SP500 = [] def save_sp500_tickers(): resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies') soup = bs.BeautifulSoup(resp.text, 'lxml') table = soup.find('table', {'class': 'wikitable sortable'}) tickers = [] for row in table.findAll('tr')[1:]: ticker = row.findAll('td')[0].text.replace('.', '-') ticker = ticker[:-1] tickers.append(ticker) with open("sp500tickers.pickle", "wb") as f: pickle.dump(tickers, f) return tickers SP500 = save_sp500_tickers() # + colab={"base_uri": "https://localhost:8080/"} id="4NLPJL-qfw9u" outputId="61f69f9e-535f-44db-831b-ae5c22554ae9" SP500
SP500.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="Tce3stUlHN0L" # ##### Copyright 2020 The TensorFlow IO Authors. # + cellView="form" colab_type="code" id="tuOe1ymfHZPu" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="qFdPvlXBOdUN" # # Azure blob storage with TensorFlow # + [markdown] colab_type="text" id="MfBg1C5NB3X0" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/io/tutorials/azure"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/io/blob/master/docs/tutorials/azure.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/io/blob/master/docs/tutorials/azure.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://raw.githubusercontent.com/tensorflow/io/master/docs/tutorials/azure.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="MsyhTesh4LSy" colab_type="text" # Caution: In addition to python packages this notebook uses `npm install --user` to install packages. Be careful when running locally. # # + [markdown] colab_type="text" id="xHxb-dlhMIzW" # ## Overview # # This tutorial shows how to use read and write files on [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) with TensorFlow, through TensorFlow IO's Azure file system integration. # # An Azure storage account is needed to read and write files on Azure Blob Storage. The Azure Storage Key should be provided through environmental variable: # ``` # os.environ['TF_AZURE_STORAGE_KEY'] = '<key>' # ``` # # The storage account name and container name are part of the filename uri: # ``` # azfs://<storage-account-name>/<container-name>/<path> # ``` # # In this tutorial, for demo purposes we also provides the optional setup of [Azurite](https://github.com/Azure/Azurite) which is a Azure Storage emulator. With Azurite emulator it is possible to read and write files through Azure blob storage interface with TensorFlow. # + [markdown] colab_type="text" id="MUXex9ctTuDB" # ## Setup and usage # + [markdown] id="upgCc3gXybsA" colab_type="text" # ### Install required packages, and restart runtime # + id="uUDYyMZRfkX4" colab_type="code" colab={} try: # %tensorflow_version 2.x except Exception: pass # !pip install tensorflow-io # + [markdown] id="yZmI7l_GykcW" colab_type="text" # ### Install and setup Azurite (optional) # # In case an Azure Storage Account is not available, the following is needed to install and setup Azurite that emulates the Azure Storage interface: # + id="YUj0878jPyz7" colab_type="code" colab={} # !npm install azurite@2.7.0 # + id="KXbiNLKY4kNM" colab_type="code" colab={} # Run `azurite-blob -s` as a background process. # IPython doesn't recognize `&` in inline bash cells. get_ipython().system_raw('azurite-blob -s &') # + [markdown] id="acEST3amdyDI" colab_type="text" # ### Read and write files to Azure Storage with TensorFlow # # The following is an example of reading and writing files to Azure Storage with TensorFlow's API. # # It behaves the same way as other file systems (e.g., POSIX or GCS) in TensorFlow once `tensorflow-io` package is imported, as `tensorflow-io` will automatically register `azfs` scheme for use. # # The Azure Storage Key should be provided through `TF_AZURE_STORAGE_KEY` environmental variable. Otherwise `TF_AZURE_USE_DEV_STORAGE` could be set to `True` to use Azurite emulator instead: # # + id="ZIrXoXgYlsj_" colab_type="code" colab={} import os import tensorflow as tf import tensorflow_io as tfio # Switch to False to use Azure Storage instead: use_emulator = True if use_emulator: os.environ['TF_AZURE_USE_DEV_STORAGE'] = '1' account_name = 'devstoreaccount1' else: # Replace <key> with Azure Storage Key, and <account> with Azure Storage Account os.environ['TF_AZURE_STORAGE_KEY'] = '<key>' account_name = '<account>' # + id="h21RdP7meGzP" colab_type="code" colab={} pathname = 'az://{}/aztest'.format(account_name) tf.io.gfile.mkdir(pathname) filename = pathname + '/hello.txt' with tf.io.gfile.GFile(filename, mode='w') as w: w.write("Hello, world!") with tf.io.gfile.GFile(filename, mode='r') as r: print(r.read()) # + [markdown] id="zF8IKV7phkIU" colab_type="text" # ## Configurations # # Configurations of Azure Blob Storage in TensorFlow are always done through environmental variables. Below is a complete list of available configurations: # # - `TF_AZURE_USE_DEV_STORAGE`: # Set to 1 to use local development storage emulator for connections like 'az://devstoreaccount1/container/file.txt'. This will take precendence over all other settings so `unset` to use any other connection # - `TF_AZURE_STORAGE_KEY`: # Account key for the storage account in use # - `TF_AZURE_STORAGE_USE_HTTP`: # Set to any value if you don't want to use https transfer. `unset` to use default of https # - `TF_AZURE_STORAGE_BLOB_ENDPOINT`: # Set to the endpoint of blob storage - default is `.core.windows.net`. #
docs/tutorials/azure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="n65CoS3YRB1L" # ## autodiff32 Documentation # + [markdown] colab_type="text" id="jOggaEsmRB1Q" # ## Introduction # # Differentiation, or the process of finding a derivative, is an extremely important mathematical operation with a wide range of applications. The discovery of extrema or zeros of functions is essential in any optimization problem, and the solving of differential equations is fundamental to modern science and engineering. Differentiation is essential in nearly all quantitative disciplines: physicists may take the derivative of the displacement of a moving object with respect to time in order to find the velocity of that object, and data scientists may use derivatives when optimizing weights in a neural network. # # Naturally, we would like to compute the derivative as accurately and efficiently as possible. Two classical methods of calculating the derivative have clear shortcomings. Symbolic differentiation (finding the derivative of a given formula with respect to a specified variable, producing a new formula as its output) will be accurate, but can be quite expensive computationally. The finite difference method ($\frac{\partial f}{\partial x} = \frac{f(x+\epsilon)-f(x)}{\epsilon}$ for some small $\epsilon$) does not have this issue, but will be less precise as different values of epsilon will give different results. This brings us to automatic differentiation, a less costly and more precise approach. # # __Extension:__ For this project, we have implemented forward-mode automatic differentiation as well as reverse-mode automatic differentiation. It is important to have both methods accessible to have accuracy as well as optimal efficiency. # # Automatic differentiation can be used to compute derivatives to machine precision of functions $f:\mathbb{R}^{m} \to \mathbb{R}^{n}$ # # The forward mode is more efficient when $n\gg m$. # - This corresponds to the case where the number of functions to evaluate is much greater than the number of inputs. # - Actually computes the Jacobian-vector product $Jp$. # # The reverse mode is more efficient when $n\ll m$. # - This corresponds to the case where the number of inputs is much greater than the number of functions. # - Actually computes the Jacobian-transpose-vector product $J^{T}p$. # + [markdown] colab_type="text" id="wktePkP2RB1R" # ## Background # # Automatic differentiation breaks down the main function into elementary functions, evaluated upon one another. It then uses the chain rule to update the derivative at each step and ends in the derivative of the entire function. # # To better understand this process, let's look at an example. Consider the example function # # \begin{equation} # f(x) = x + 4sin(\frac{x}{4}) # \end{equation} # # We would like to compute the derivative of this function at a particular value of x. Let's say that in this case, we are interested in evaluating the derivative at $x=\pi$. In other words, we want to find $f'(\pi)$ where $f'(x) = \frac{\partial f}{\partial x}$ # # We know how to solve this _symbolically_ using methods that we learned in calculus, but remember, we want to compute this answer as accurately and efficiently as possible, which is why we want to solve it using automatic differentiation. # # ### The Chain Rule # # To solve this using automatic differentiation, we need to find the decomposition of the differentials provied by the chain rule. Remember, the chain rule is a formula for computing the derivative of the composition of two or more functions. So if we have a function $h\left(u\left(t\right)\right)$ and we want the derivative of $h$ with respect to $t$, then we know by the chain rule that the derivative is $\dfrac{\partial h}{\partial t} = \dfrac{\partial h}{\partial u}\dfrac{\partial u}{\partial t}.$ The chain rule can also be expanded to deal with multiple arguments and vector inputs (in which case we would be calculating the _gradient)_. # # Our function $f(x)$ is composed of elemental functions for which we know the derivatives. We will separate out each of these elemental functions, evaluating the derivative at each step using the chain rule. # # ### Forward-mode differentiation # # Using forward-mode differentiation, the evaluation trace for this problem looks like: # # | Trace | Elementary Operation | Derivative | $f'(a)$ | # | :------: | :----------------------: | :------------------------------: | :------------------------------: | # | $x_{3}$ | $\pi$ | $1$ | $\pi$ | $1$ | # | $x_{0}$ | $\frac{x_{3}}{4}$ | $\frac{\dot{x}_{3}}{4}$ | $\frac{1}{4}$ | # | $x_{1}$ | $\sin\left(x_{0}\right)$ | $\cos\left(x_{0}\right)\dot{x}_{0}$ | $\frac{\sqrt{2}}{8}$| # | $x_{2}$ | $4x_{1}$ | $4\dot{1}_{3}$ | $\frac{\sqrt{2}}{2}$ | # | $x_{4}$ | $x_{2} + x_{3}$| $\dot{x}_{2} + \dot{x}_{3}$ | $1 + \frac{\sqrt{2}}{2}$ | # # This evaluation trace provides some intuition for how forward-mode automatic differentiation is used to calculate the derivative of a function evaluated at a certain value ($f'(\pi) = 1 + \frac{\sqrt{2}}{2}$). # # <img src="computational_graph.png"></img> # *Figure 1: Forward-mode computational graph for example above* # # ### Reverse-mode automatic differentiation # # Using reverse-mode differentiation, sometimes called backpropagation, the trace on the right is evaluated top to bottom intsead. # # | Trace | Elementary Operation | Derivative &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;| $f'(a)$ | # | :------: | :----------------------: | :------------------------------: | :------------------------------: | # | $x_{3}$ | $\pi$ | $\frac{\partial{x_0}}{\partial{x_3}}\bar{x_0} + \bar{x_4} = \cos(\frac{\pi}{4}) + 1$ | $1 + \frac{\sqrt{2}}{2}$ | # | $x_{0}$ | $\frac{x_{3}}{4}$ | $\frac{\partial{x_1}}{\partial{x_0}}\bar{x_1} = 4\cos(\frac{\pi}{4})$ | $2\sqrt{2}$ | # | $x_{1}$ | $\sin\left(x_{0}\right)$ | $\frac{\partial{x_2}}{\partial{x_1}}\bar{x_2}$| $4$| # | $x_{2}$ | $4x_{1}$ | $\frac{\partial{x_4}}{\partial{x_2}}\bar{x_4}$ | $1$ | # | $x_{4}$ | $x_{2} + x_{3}$| $1$ | $1$ | $1$ # # <img src="reverse_graph.png"></img> # *Figure 2: Reverse-mode computational graph for example above* # # You may notice that when we computed the derivative above, we "seeded" the derivative with a value of 1. This seed vector doesn't have to be 1, but the utility of using a unit vector becomes apparent when we consider a problem involving directional derivatives. # # The definition of the directional derivative (where $p$ is the seed vector) # $$D_{p}x_{3} = \sum_{j=1}^{2}{\dfrac{\partial x_{3}}{\partial x_{j}}p_{j}}$$ # # can be expanded to # # \begin{align} # D_{p}x_{3} &= \dfrac{\partial x_{3}}{\partial x_{1}}p_{1} + \dfrac{\partial x_{3}}{\partial x_{2}}p_{2} \\ # &= x_{2}p_{1} + x_{1}p_{2} # \end{align} # # If we choose $p$ to be a the unit vector, we can see how this is beneficial: # # $p = \left(1,0\right)$ gives $\dfrac{\partial f}{\partial x}$ # # $p = \left(0,1\right)$ gives $\dfrac{\partial f}{\partial y}$ # # So to summarize, the forward mode of automatic differentiation is really computing the _product of the gradient of our function with the seed vector:_ # # $$D_{p}x_{3} = \nabla x_{3}\cdot p.$$ # # If our function is a vector, then the forward mode actually computes $Jp$ where $J = \dfrac{\partial f_{i}}{\partial x_{j}}, \quad i = 1,\ldots, n, \quad j = 1,\ldots,m$ is the Jacobian matrix. Often we will really only want the "action" of the Jacobian on a vector, so we will just want to compute the matrix-vector product $Jp$ for some vector $p$. Using the same logic, the reverse mode actually computes the Jacobian-transpose-vector product $J^{T}p$. # # Automatic differentiation can be used to compute derivatives to machine precision of functions $f:\mathbb{R}^{m} \to \mathbb{R}^{n}$ # + [markdown] colab_type="text" id="gss6Ih33RB1S" # ## How to Use autodiff32 # ### Installation # # **1) Create a virtual environment (optional)** # # From the terminal, create a virtual environment: # # _(The command below will create the virtual environment in your present working directory, so consider moving to a project folder or a known location before creating the environment)_ # # ```virtualenv env``` # # activate the virtual environment: # # ```source env/bin/activate``` # # if you plan to launch a jupyter notebook using this virtual environment, run the following to install and set up jupyter in your virtual environment: # # ```python -m pip install jupyter``` # # ```python -m ipykernel install --user --name=env``` # # **3) Install the autodiff32 package** # # In the terminal, type: # ```pip install autodiff32``` # Package dependencies will be taken care of automatically! # # _(Alternatively, it is also possible to install the autodiff32 package by downloading this GitHub repository. If you choose that method, use the requirements.txt file to ensure you have installed all necessary dependencies.)_ # # ## Tutorial # # It is easy to use the autodiff32 package in a Jupyter notebook, as we will demonstrate here: # # _(Alternatively, you can start a Python interpreter by typing ```Python``` into the terminal, or work from your favorite Python IDE.)_ # # _(Remember, if you are using a virtual environment, follow steps 1 through 3 above and then type ```jupyter notebook``` into your terminal to launch a notebook. Inside the notebook, switch the kernel to that of your virtual environment.)_ # + colab={"base_uri": "https://localhost:8080/", "height": 108} colab_type="code" id="Vvv3m0TSq8Ok" outputId="65f3bf75-0b72-457b-e166-dd28a838b9a4" pip install autodiff32 # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="tIJeO9eWRB1T" outputId="c3ab1f31-70f2-446c-f257-a99fb079d994" import autodiff32 as ad import math # only necessary for this particular example """ Initialize an AutoDiff object with the number you would like to pass into your function """ X = ad.AutoDiff(math.pi) """ Define your function of interest """ func = X + 4*ad.sin(X/4) """ Look at the derivative of your function evaluated at the number you gave above """ print("derivative:",func.der) """ Look at the value of your function evaluated at the number you gave: """ print("value:", func.val) # + [markdown] colab_type="text" id="ex7bVik4RB1X" # Notice that this is the same equation used in our example above: $f(x) = x + 4(sin(\frac{x}{4}))$. Just for fun, let's see if the derivative that we calculated in the evolution trace is the same as the result using autodiff32: # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="2M6ICTkJRB1X" outputId="f7501d7f-9fd6-4115-ade1-3d6c21cce5b0" print("autodiff32 derivative:", func.der) print("evolution trace derivative:", 1+math.sqrt(2)/2) # + [markdown] colab_type="text" id="3cIZFPDrRB1a" # We can see that the derivative calculated using autodiff32 is the same as the derivative calulated by walking manually through the evolution trace! # + [markdown] colab_type="text" id="JYu8Zz2QRB1a" # Now what if your function if interest has a **vector input** (X has more than one value)? In that case, use the following workflow: # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="ZL7V_sB3RB1b" outputId="50d94c68-0b37-4bf5-e65a-51c178b06955" import numpy as np X = ad.AutoDiff(np.array([1,2,3])) func = X**2 print("value:", func.val) print("derivative:",func.der) # + [markdown] colab_type="text" id="jyBCIUzxRB1d" # Notice that there are three values and three derivatives. This is because your function and its derivative have been evaluated at the three values you provided. # # + [markdown] colab_type="text" id="nXmtGVIcRB1e" # Now what if your function if interest is a **multivariate function** (has more than just an X variable)? In that case, use the following workflow: # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="gTuNzWAlRB1e" outputId="4395c0f8-5cd5-42cb-cc5e-6304583faf0d" X,Y = ad.Multi_AutoDiff_Creator(X = 2, Y = 4).Vars func = X**2 + 3*Y print("value:", func.val) print("derivative:",func.der) # + [markdown] colab_type="text" id="FMmUESrGRB1g" # Notice that the derivative has two values. This is the derivative of your function with respect to X and Y, evaluated at the values of X and Y you provided. # # + [markdown] colab_type="text" id="MVc8Y2YURB1h" # Now what if you actually have **multiple functions of interest**? In that case, use the following workflow: # + colab={"base_uri": "https://localhost:8080/", "height": 181} colab_type="code" id="yl03xRFKRB1h" outputId="775578fd-0b4c-4bcb-9553-386473c740ef" X,Y = ad.Multi_AutoDiff_Creator(X = 2, Y = 4).Vars func = np.array([X+Y, 2*X*Y]) # two functions! # get value and derivatives of function separately print("value of first function:", func[0].val) print("derivative of first function:",func[0].der) print("\nvalue of second function:", func[1].val) print("derivative of second function:",func[1].der) # return the Jacobian matrix J = ad.Jacobian(func) print("\nJacobian matrix:\n",J.value()) # + [markdown] colab_type="text" id="UjSQNe9IRB1k" # Notice that you have an additional option here! You can return the values and derivatives of the functions as you would normally (except that you indicate the index of the function when asking for the value or derivative), _or_ you can return the **Jacobian matrix** which contains the derivatives with respect to X and Y for each of the functions. # # + [markdown] colab_type="text" id="md5dfRqERB1k" # Now what if your function if interest is a **multivariate function AND it has vector inputs**? In that case, use the following workflow: # # _Please note that the workflow here is significantly different from the rest of the package!_ # # _This is due to the complexities of handling the derivatives of vector inputs for multivariate functions. We wanted to give you, the user, as much functionality as possible, even if that meant sacrificing a bit in user-friendliness._ # + colab={"base_uri": "https://localhost:8080/", "height": 145} colab_type="code" id="yMwFjLWdRB1l" outputId="e5ab6fb6-47a9-4cc0-f964-bd195167c5be" # For a single multivariate function evaluated at vector value inputs # define your variables and their vector values X = [1,2,3] Y = [2,3,3] Z = [3,5,3] W = [3,5,3] # put them together in a list, in the order they will be used in your function! VarValues = [X, Y, Z, W] # define your function # Vars[0] represents X, Vars[1] represents Y, etc. func = lambda Vars:3*Vars[0] + 4*Vars[1] + 4*Vars[2]**2 + 3*Vars[3] # find the values and derivatives Values, Derivatives = ad.MultiVarVector_AutoDiff_Evaluate(VarValues,func) print("values:\n", Values) print("\nderivatives:\n", Derivatives) # + [markdown] colab_type="text" id="GLBdpOrXRB1n" # Now what if you have **multiple multivariate functions of interest with vector inputs**? In that case, use the following workflow: # # _Please note that the workflow here is significantly different from the rest of the package!_ # # _This is due to the complexities of handling the derivatives of vector inputs for multivariate functions. We wanted to give you, the user, as much functionality as possible, even if that meant sacrificing a bit in user-friendliness._ # + colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" id="SkxKjLRZRB1n" outputId="6f27bd72-4317-487b-e0ea-c09f071c783e" # For a single multivariate function evaluated at vector value inputs # define your variables and their vector values X = [1,2,3] Y = [2,3,3] Z = [3,5,3] W = [3,5,3] # put them together in a list, in the order they will be used in your function! VarValues = [X, Y, Z, W] # define your functions # Vars[0] represents X, Vars[1] represents Y, etc. func = lambda Vars:np.array([3*Vars[0] + 4*Vars[1] + 4*Vars[2]**2 + 3*Vars[3], # first function 5*Vars[0] + 6*Vars[1] + 7*Vars[2]**2 + 1*Vars[3]]) # second function # find the values and derivatives Values, Derivatives = ad.MultiVarVector_AutoDiff_Evaluate(VarValues,func) print("values:\n", Values) print("\nderivatives:\n", Derivatives) # + [markdown] colab_type="text" id="A_Th2N8rRVI-" # **Extension Usage Demo** # # + colab={"base_uri": "https://localhost:8080/", "height": 90} colab_type="code" id="nbKFkDIAiveP" outputId="4b09d34c-9fe5-42a6-ccbb-a48db5cce19c" # For univariate /Multivariate Scalar Function Graph = ad.ComputationalGraph() X = ad.Node(value = 3, Graph = Graph) Y = ad.Node(value = 4, Graph = Graph) Z = ad.Node(value = 1, Graph = Graph) G = 2*X + 3*Y*Z + 2*Z Graph.ComputeValue() Graph.ComputeGradient(-1) print("Derivative for X is: ",X.deri) print("Derivative for Y is: ",Y.deri) print("Derivative for Z is: ",Z.deri) print("Value of Function is: ",G.value) # + colab={"base_uri": "https://localhost:8080/", "height": 126} colab_type="code" id="ZVAr5ESTFiAi" outputId="afa22b98-fb3f-4f65-aa12-2515d3c12617" #For univariate or Multivariate Vector Functions with single value import numpy as np Graph = ad.ComputationalGraph() X = ad.Node(value = 3, Graph = Graph) Y = ad.Node(value = 4, Graph = Graph) Z = ad.Node(value = 1, Graph = Graph) G = np.array([-2*ad.sinr(X), #please use sinr for sin operation on the node 2*Y + Z*Y, 3*X+3*Y*X+2*Z]) Func = ad.ReverseVecFunc(G,X =X,Y= Y,Z= Z) Value ,Derivative = Func.value(Graph) print("The value for the vector function is: ") print(Value) print("The derivative for the vector function is: ") print(Derivative) # + colab={"base_uri": "https://localhost:8080/", "height": 308} colab_type="code" id="wLRVyqUWGlTU" outputId="cd3adfdc-2c8e-42ca-9f0a-208e52350ca7" #SERIES OF VALUES For vector functions D = 3 # number of variables x = [1,2,3] y = [6,7,4] z = [3,8,1] Values = np.array([x,y,z]) G = np.array([-2*X, #please use sinr for sin operation on the node 2*Y + Z*Y, 3*X+3*Y*X+2*Z]) Func = ad.ReverseVecFunc(G,X =X,Y= Y,Z= Z) Vals,Deris=Func.Seriesvalue(Values,D,Graph) print("The value for the vector function is: ") print(Vals) print("The derivative for the vector function is: ") print(np.array(Deris)) # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="Rw54CVb4SfQV" outputId="ae3bb0e5-bd80-447e-f789-8407456d6262" import numpy as np Graph = ad.ComputationalGraph() X = ad.Node(value = 3, Graph = Graph) F = 3*X**2 Xvals= np.array([[3,2,4]]) #Please input Xvals as a 2 dimensional array Vals, deri = Graph.SeriesValues(Xvals,1,Graph) print(Vals) print(deri) # + [markdown] colab_type="text" id="ORwjEJWKRB1q" # ## Software Organization # ### Directory Structure # Our structure is as follows: # # /cs207-FinalProject # README.md # LICENSE # .gitignore # .travis.yml # setup.py # requirements.txt # docs/ # milestone1.ipynb # milestone2.ipynb # documentation.ipynb # autodif32/ # __init__.py # AutoDiffObj.py # Elementary.py # ElementaryReverse.py # Graph.py # JacobianVectorFunc.py # MultivariateVarCreator.py # MultivariateVectorAutoDiffEvaluate.py # MultivariateVectorVarCreator.py # ReverseJacobVectorFunc.py # ReverseMode.py # test/ # __init__.py # autodiffobj_test.py # elementary_test.py # Reverse_test.py # JacobianVectorFunc_test.py # # # ### Modules # # The ```AutoDiffObj``` module creates an AutoDiff object based on the scalar value you would like to evaluate a function and its derivative at. It overloads the basic operations including multiply, add, negative, subtract, powers, division, and equality. It also includes a Jacobian method, which returns the Jacobian of the function. If the function is univariate, then the Jacobian is just the derivative. If the function is multivariate (not yet implemented), then the Jacobian will be an array. # # The ```Elementary``` module implements some of the elementary functions for use in the forward mode of autodifferentiation, including exp, log, sqrt, sin, cos, tan, asin, acos, atan, sinh, cosh and tanh. # # The ```ElementaryReverse``` module implements some of the elementary functions for use in the reverse mode of autodifferentiation. These functions are made distinct from those in the Elementary module by an r at the end of the function name (expr, logr, sqrtr, sinr, cosr, tanr, asinr, acosr, atanr, sinhr, coshr and tanhr). # # The ```Graph``` module implements the ComputationalGraph class which allows each node to be recorded in sequence in a graph for later use in value and derivative computation. This module also implements the ComputeValue and ComputeGradient functions, which compute the value and gradient using reverse mode and leveraging the computational graph. # # The ```JacobianVectorFunc``` module implements the Jacobian method for vector inputs. # # The ```MultivariateVarCreator``` module takes in the values of each variable in a user defined multivariate function, and returns len(kwargs) number of AutoDiff class variables with derivative (seed) as an np.array. The MultivariateVarCreator class acts as a helper function for user to create multiple AutoDiff Objects conveniently (instead of manually create many AutoDiff objects) to use in the evaluation of the multivariate function. Please note that the implementation of multivariate functions is still in progress. # # The ```MultivariateVectorAutoDiffEvaluate``` module implements the MultiVarVector_AutoDiff_Evaluate function, which autodifferentiates and evaluates a multivariate function using forward mode at user defined vector input values. # # The ```MultivariateVectorVarCreator``` module implements the Multi_Vector_AutoDiff_Creator class, which instantiates multiple AutoDiff objects (for use in multivariate functions). # # The ```ReverseJacobVectorFunc``` implements the ReverseVecFunc class, which takes the vector function and variables as inputs, and computes the value and the Jacobian matrix using the reverse mode of automatic differentiation. # # The ```ReverseMode``` module implements the node class, which is a single AutoDiff Object used for reverse mode. # # # ### Testing Suite # Our testing files live in the `test/` directory. The tests are run using pytest. # # # + [markdown] colab_type="text" id="wh3FSEUQRB1r" # ### Installation procedure # # 1) For general users, install the autodiff32 package using pip (see 'How to Use autodiff32' above for complete instructions) # # ```pip install autodiff32``` # # 2) For developers, feel free to clone this repository, and use the requirements.txt file to ensure you have installed all necessary dependencies. # + [markdown] colab_type="text" id="fGgFKv5mRB1r" # ## Implementation details # # The current implementation of AutoDiff32 allows for scalar univariate inputs to functions. Core classes include the AutoDiff class, . AutoDiff32 is externally dependent on numpy, and this dependency has been automatically taken care of in the released version of the package (as well as in the requirements.txt file if the user chooses to manually download the package). AutoDiff32 has implemented a number of elementary functions, as listed above in the description of the basic modules. # # We plan to continue development of the AutoDiff32 package to allow for vector inputs as well as multivariate inputs. This will require robust Jacobian matrix functionality. # # In addition to the currently implemented forward mode, we plan to build out the reverse mode of auto differentiation as an advanced feature. # # # # ### Forward mode Details: # # The core data structure (and also external dependencies) for our implementation will be numpy arrays, and the core classes we have implemented are described below: # # 1) An ***AutoDiff class*** which stores the current value and derivative for the current node. The class method will conatin overloading operators such as plus, minus, multiply, etc. # # ```python # """ # Initialize an Automatic Differentiation object which stores its current value and derivative # # Note that the derivative needs to not be a scalar value # # For multivariable differentiation problems of a scalar function, the derivative will be a vector # """ # # def __init__(self,value,der=1) : # #Store the value of the Autodiff object # #Store derivative of autodiff object (default value is 1) # # # #overloading operators to enable basic operations between classes and numbers (not exhaustive): # # """ # These methods will differentiate cases in which other is a scalar, a vector, a class, or any child class # # All of these operators will return AutoDiff classes # """ # def __mult__(self,other): # def __rmult__(self,other): # def __radd__(self,other): # def __add__(self,other): # # # ``` # # Multivariate functions (both scalar and vector) can be also evaluated using the AutoDiff class as below, and the resulting Jacobian will be a vector array: # # ```python # X = AutoDiff(1,np.array([1,0])) # Y = AutoDiff(1,np.array([0,1])) # # func = X + 2*Y # ``` # # While this way of defining and evaluating multivariate functions is feasible, it is very inconvenient for the users to have to keep track of the dimensionality of the derivatives. For,example, the above func definition will raise an error if Y's derivative is defined as np.array([0,0,1]). This potential problem in dimensionality will also be a cause difficulties in the error handling process in the code. # # The way we tackle this problem is to create a helper class called **Multi_AutoDiff_Creator** as described below: # # 2) A ***Multi_AutoDiff_Creator class*** which helps the user create variable objects from a multivariable function # # ```python # # """ # This class helps users initialize different variables from a multivariable function without explicitly specifying them using separate AutoDiff classes # # It will need to import AutoDiff Class # """ # def __init__(self,*args,**kwargs): # ''' # INPUT : variables as kwargs such as (X=3,Y =4) # RETURN : X number of autodiff objects with length kwargs and each with its 'vector' derivatives # # ''' # #Demo and comparison # # '''initiate a multivariable function using Multi_AutoDiff_Creator class''' # X,Y= Multi_AutoDiff_Creator(X = 1., Y=3.).Vars #X,Y are autodiff object with derivative [1,0] and [0,1] # func = X + 2*Y*X # ``` # # Notice that this class only serves as a **helper class** to ensure that every variable created has the correct format of dimensionality. The class itself has no influence on the forward mode differentation process. # # For better calculation of the derivatives of our elementary functions, we also introduce our elementary function methods. # # 3) An ***Elementary function*** file which calculate derivatives for elementary functions as described previously # ```python # #Elementary Function Derivative (not exhaustive): # def exp(self,ADobj): # def tan(self,ADobj): # def sin(self,ADobj): # def cos(self.ADobj): # ''' # RETURN an AutoDiff object if ADobj is an AutoDiff object with related value and derivative for the particular elementary functions # Return np.exp/tan/sin(ODobj) if ODobj is a number # ''' # # ``` # # 4) A ***Jacobian*** class which helps the user compute the Jacobian matrix for vector functions evaluated at a single point # ```python # class Jacobian: # def __init__(self,vector_func): # #The Jacobian class is initiated by a vector function # # def value(self): # # Return the Jacobian value of the vector functions evaluated at a single point by looping through the vector function # ''' # x, y = ad.Multi_AutoDiff_Creator(x=2, y=3).Vars # #Define a vector function # func = np.array([x+y, 2*x*y]) # Jacob = ad.Jacobian(func) # print(Jacob.value()) # this class method output the full jacobian matrix as an np array # ''' # ``` # # + [markdown] colab_type="text" id="EtfrL-oGRB1s" # ## Extension # # + [markdown] colab_type="text" id="G7Pw7-62mACM" # **Description** # # The implementation of reverse mode also lies in the same autodiff32 package with no additional extension requirements. # # We implemented the reverse mode of automatic differentiation in which users can evaluate univariate and multivariate scalar/vector functions with series or single values. The alogorithm is based on the computational flow chart (graph) for reverse mode discussed in class, in which each node is recorded in sequence in a Graph for later use in value and derivative computation. In particular, each node stores its value, the graph it connects to, and the index it has in the graph. In order to backward compute the derivative for our root node, each node records the parent which produced it through some operations, and also the oepration type ("plus","sub",etc.) itself. # # We find this more intuitive and pedagogical than using recursion, and potentially can be more computationally efficient since each node only has to remeber its direct children, but not the indirect ones. The graph and node can also be reused easily, which makes it simpler and more computationally efficient in our evaluation for multiple values of our variables. # # The high level mathematical ideas will be similar to that of the forward mode, the only additional thing that will be helpful to keep in mind is the chain rule, which is: # # \begin{aligned} # \frac{dz}{dx} = \frac{dz}{dy}\times\frac{dy}{dx} # \end{aligned} # # This will help use understand how we can calculate the derivatives by starting to set seed value of 1 for our function, which is: # \begin{aligned} # \frac{dz}{df} = 1 # \end{aligned} # For example, if we would like to calculate the derivative for $f = 2x$ backward,by letting $z=f$,we will have: # # \begin{aligned} # \frac{dz}{dx}=\frac{dz}{df}\times\frac{df}{dx} = 1 \times 2 = 2 # \end{aligned} # # # # # # + [markdown] colab_type="text" id="oOzf0Yfo9Tqy" # ###Implementation Details for Reverse Mode # # The reverse mode implementation will have the following classes: # # 1) A **Node** class which serve as single automatic differentiation object.All the node will be connected to the same graph for a given function. # ```python # class Node: # #INITIATOR # ''' # The Node class is initiated with the paramters mentioned below. The node # will connect to the graph as soon as it is created. # ''' # def __init__(values,Graph,derivative,leftparentnode,rightparentnode,derivative = 0): # # # def CheckConstant(x): # #Check if x is a Node or not. If it is ,return it, # # if not,return a new node with Node.value = x ,and connect it to the Graph. # # #OVERLOADING OPERATORS # ''' # perform subtraction between nodes # Return # ====== # A new node which store the value,the graph it connects to, and self as its left nod and other as its right node. # ''' # def __mul__(self,other): # def __sub__(self,pther): # def __truediv__(self,other): # # # ``` # # 2 A **ComputationalGraph** class which stores the nodes of a given function in sequence and compute the value and the gradient of the function and the root variables. # # ```python # # class ComputationalGraph: # # def __init__(self): # # the graph is initialized by an empty list # def append(self): # #append the node in sequence in the graph and record its index for later computation # def ValidOp(self): # # sturcturely store every valid operator in this reverse mode computation # ''' # RETURN # ====== # Valid operator code if valid otherwise raise error # ''' # def ComputeValue(self): # #Compute the Value of the function by a forward pass of th graph # # def ComputeGradient(self,lastindex = -1): # ''' # Backward propagate through the graph to calcutate the derivative of the nodes and store it in each node by looping through the list in a reverse order and update the parent nodes using child nodes. # INPUT : Last index is the seed : dz/df = 1 # # RETURN # ====== # NONE # All the values after computation is stored in each node in the list # ''' # def SeriesValues(self,args): # #compute the value and derivatives for # #a series of values for a function (illustration in detail in the How to use session) # ''' # RETURN # === # A two dimensional Array for derivatives and one dimensional array for values # Values for the funtion evaulated at different points # Derivatives of root variables evaluated at different points # ''' # # ``` # # 3) A class **ReverseVecFunc** which calculate the value and derivative for vector functions evaluated at different points # ```python # # class ReverseVecFunc: # # def __init__(self): # #it stores the variables and the functions when the class is initialized. # def value(self,Graph): # #it computes the Jacobian and the value for vector functions for a given single value of variable # def SeriesValues(self,values,dimension,Graph): # ''' # INPUT # ===== # value = values of the functions # dimension : the number of variables # Graph : the graph to be connected # RETURN # ====== # The value and the jacobian (both in 2D nparrays) of the function at a series of values # # # ''' # #pseudocodes # initialize a valuelist = [] # for each function in vection functions: # calculate its gradients and values using Wrapper(args) # append it to a valuelist # # def Wrapper(args): # # A helper function to help calculate the values and derivative for a series values # ''' # INPUT # ===== # value = values of the functions # dimension : the number of variables # Graph : the graph to be connected # Returns the derivatives and values of a single function evaluated different values # ''' # # ``` # 4) Additional **ElementaryReverse.py** which defines the elementary functions operations for the Node objects # ```python # # ''' # NOTE # ===== # In order to differentiate between reverse mode elementary functions and forward mode ones, all the elementary function in reverse mode will have a "r" as the last alphabet. # # If user would like to use these function with a constant , please use built in functions in Numpy: such as : np.exp(2), np.sin(3) etc. # # RETURN # ===== # New Node object that stores the value after related computation and its relevant leftparent node (which is itself). The node returned wont have any rightparent node. # # ''' # def expr(x): # def logr(x): # def sqrtr(x): # ... # ``` # # + [markdown] colab_type="text" id="3QMX-moZIKJ4" # ## Future Features # As we have seen, automatic differentiation is an efficient and accurate way to compute derivatives, so it makes sense # to try to apply this where we can. One of the most popular methods that use the derivative is gradient descent. # Gradient descent is an optimization problem where we try to minimize some function. We do this by taking some starting point # and moving in the direction # of the steepest part of the function, or the most negative gradient. To find this minimum point, it makes sense that # want the gradient to be precise, so the point is precise, so automatic differentiation is the natural choice. # # Building onto that idea, another use of this could be in neural networks. The entire concept of a neural network # is centered around optimizing its weights, and at each step and each time the node weights update, we need to calculate # lots of partial derivatives, so once again, it makes sense to use automatic differentiation. # Similarly, the concept of deep learning is essentially a large collection of neural networks, so the same utility # from automatic differentiation can be gained here. # # Another interesting application can be found in statistics, namely, in a Markov chain Monte Carlo sampling method, called # the Hamiltonian Monte Carlo Algorithm. Without diving into deep statistical explanations, this is an algorithm to create a # random sample from a probability distribution that is difficult to get normally. Most Markov chain Monte Carlo algorithms # coverge quite slowly, and as a result explore the sampling slowly as well. The hamiltonian algorithm # does a better job with convergence, but at the cost of having to evaluate complicated gradients of probability models along the way. # However, with automatic differentiation, the user no longer has to manually derive the gradients, and this cost can be reduced significantly. # # ref:http://jmlr.org/papers/volume18/17-468/17-468.pdf
docs/documentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 0.059677, "end_time": "2022-04-01T13:40:33.417593", "exception": false, "start_time": "2022-04-01T13:40:33.357916", "status": "completed"} tags=[] # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + _kg_hide-input=true papermill={"duration": 35.692492, "end_time": "2022-04-01T13:41:09.131214", "exception": false, "start_time": "2022-04-01T13:40:33.438722", "status": "completed"} tags=[] pip install talos # + papermill={"duration": 6.24413, "end_time": "2022-04-01T13:41:15.422120", "exception": false, "start_time": "2022-04-01T13:41:09.177990", "status": "completed"} tags=[] import pandas as pd import numpy as np import seaborn as sns from sklearn import linear_model from sklearn.model_selection import train_test_split from matplotlib import pyplot as plt from scipy import stats from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import scale # keres modules from keras import regularizers from keras.models import Sequential, load_model from keras.layers import Dense, Activation, Dropout from keras.layers import Flatten, Conv1D, MaxPooling1D from keras.activations import relu, elu, linear, softmax from keras.callbacks import EarlyStopping, Callback from keras.wrappers.scikit_learn import KerasRegressor from tensorflow.keras.optimizers import Adam # - Works from keras.losses import mean_squared_error, categorical_crossentropy, logcosh from keras.utils.np_utils import to_categorical # + [markdown] papermill={"duration": 0.044466, "end_time": "2022-04-01T13:41:15.511459", "exception": false, "start_time": "2022-04-01T13:41:15.466993", "status": "completed"} tags=[] # **Matrix Y contains the average grain yield, column 1: Grain yield for environment 1 and so on.** # # **Matrix X contains marker genotypes.** # # + papermill={"duration": 0.242016, "end_time": "2022-04-01T13:41:15.799780", "exception": false, "start_time": "2022-04-01T13:41:15.557764", "status": "completed"} tags=[] # load data as a pandas dataframe X = pd.read_csv('/kaggle/input/genomicselection-data-weat/DATA/wheat.X', header=None, sep='\s+') Y = pd.read_csv('/kaggle/input/genomicselection-data-weat/DATA/wheat.Y', header=None, sep='\s+') # + papermill={"duration": 0.065012, "end_time": "2022-04-01T13:41:15.910213", "exception": false, "start_time": "2022-04-01T13:41:15.845201", "status": "completed"} tags=[] print(X.head(10)) print('#'*50) print(X.shape) # + papermill={"duration": 0.05698, "end_time": "2022-04-01T13:41:16.013790", "exception": false, "start_time": "2022-04-01T13:41:15.956810", "status": "completed"} tags=[] print(Y.head(10)) print('#'*50) print(Y.shape) # + papermill={"duration": 0.067176, "end_time": "2022-04-01T13:41:16.127499", "exception": false, "start_time": "2022-04-01T13:41:16.060323", "status": "completed"} tags=[] # data pattitioning into train and validation itrait=1 X_train, X_test, y_train, y_test = train_test_split(X, Y[itrait], test_size=0.2) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) # + papermill={"duration": 0.057582, "end_time": "2022-04-01T13:41:16.230439", "exception": false, "start_time": "2022-04-01T13:41:16.172857", "status": "completed"} tags=[] # print basic statistics: max, min, mean, sd print(' min max mean sd') print('Train: ', y_train.min(), y_train.max(), y_train.mean(), np.sqrt(y_train.var())) print('Test: ', y_test.min(), y_test.max(), y_test.mean(), np.sqrt(y_test.var())) # + papermill={"duration": 0.328059, "end_time": "2022-04-01T13:41:16.604973", "exception": false, "start_time": "2022-04-01T13:41:16.276914", "status": "completed"} tags=[] # basic histograms plt.title('train / test data') plt.hist(y_train, label='Train') plt.hist(y_test, label='Test') plt.legend(loc='best') plt.show() # + [markdown] papermill={"duration": 0.046499, "end_time": "2022-04-01T13:41:16.709172", "exception": false, "start_time": "2022-04-01T13:41:16.662673", "status": "completed"} tags=[] # **Marker PCA, use whole x with different color for train and test** # + papermill={"duration": 0.652708, "end_time": "2022-04-01T13:41:17.408713", "exception": false, "start_time": "2022-04-01T13:41:16.756005", "status": "completed"} tags=[] X = np.concatenate((X_train, X_test)) pca = PCA(n_components=2) p = pca.fit(X).fit_transform(X) Ntrain=X_train.shape[0] plt.title('PCA decomposition') plt.scatter(p[0:Ntrain,0], p[0:Ntrain,1], label='Train') plt.scatter(p[Ntrain:,0], p[Ntrain:,1], label='Test', color='orange') plt.legend(loc='best') plt.show() # + [markdown] papermill={"duration": 0.05036, "end_time": "2022-04-01T13:41:17.515429", "exception": false, "start_time": "2022-04-01T13:41:17.465069", "status": "completed"} tags=[] # **SNP preselection according to a simple GWAS** # # + papermill={"duration": 0.729986, "end_time": "2022-04-01T13:41:18.300455", "exception": false, "start_time": "2022-04-01T13:41:17.570469", "status": "completed"} tags=[] pvals = [] for i in range(X_train.shape[1]): b, intercept, r_value, p_value, std_err = stats.linregress(X_train[i], y_train) pvals.append(-np.log10(p_value)) pvals = np.array(pvals) # plot GWAS plt.ylabel('-log10 p-value') plt.xlabel('SNP') plt.plot(pvals, marker='o', color='red') plt.show() # select N_best most associated SNPs # N_best = X_train.shape[1] # all SNPs N_best = 100 snp_list = pvals.argsort()[-N_best:] # select by min P_value min_p_value = 2 snp_list = np.nonzero(pvals>min_p_value) # + [markdown] papermill={"duration": 0.048443, "end_time": "2022-04-01T13:41:18.399147", "exception": false, "start_time": "2022-04-01T13:41:18.350704", "status": "completed"} tags=[] # **Standard penalized methods** # **lasso using scikit-learn** # # + papermill={"duration": 0.239576, "end_time": "2022-04-01T13:41:18.687752", "exception": false, "start_time": "2022-04-01T13:41:18.448176", "status": "completed"} tags=[] # alpha is regularization parameter lasso = linear_model.Lasso(alpha=0.01) lasso.fit(X_train, y_train) y_hat = lasso.predict(X_test) # mean squared error mse = mean_squared_error(y_test, y_hat) print('\nMSE in prediction=', mse) # correlation btw predicted and observed corr = np.corrcoef(y_test,y_hat)[0,1] print('\nCorr obs vs pred =', corr) # + papermill={"duration": 0.262578, "end_time": "2022-04-01T13:41:19.036218", "exception": false, "start_time": "2022-04-01T13:41:18.773640", "status": "completed"} tags=[] # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') # plot observed vs predicted targets plt.title('Lasso: observed vs predicted Y') plt.ylabel('Predicted') plt.xlabel('Observed') plt.scatter(y_test, y_hat, marker='o', cmap='viridis', alpha=0.3) plt.show() # + [markdown] papermill={"duration": 0.050209, "end_time": "2022-04-01T13:41:19.152091", "exception": false, "start_time": "2022-04-01T13:41:19.101882", "status": "completed"} tags=[] # **Implements a standard fully connected neural network for quantitative targets** # + papermill={"duration": 0.057491, "end_time": "2022-04-01T13:41:19.259505", "exception": false, "start_time": "2022-04-01T13:41:19.202014", "status": "completed"} tags=[] # number of SNPs in data nSNP = X_train.shape[1] nSNP # + papermill={"duration": 2.134257, "end_time": "2022-04-01T13:41:21.444832", "exception": false, "start_time": "2022-04-01T13:41:19.310575", "status": "completed"} tags=[] # Instantiate model = Sequential() # add first layes model.add(Dense(64, input_dim=nSNP)) model.add(Activation('relu')) # add second layer model.add(Dense(32)) model.add(Activation('softplus')) #last, output layer model.add(Dense(1)) # Model Compiling (https://keras.io/models/sequential/) # compile(optimizer, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None) # Stochastic Gradient Descent (‘sgd’) as optimization algorithm # Mean Squared Error as loss, ie, quantitative variable, regression model.compile(loss='mean_squared_error', optimizer='sgd') # list some properties model.summary() #tarining ## fit(x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1) model.fit(X_train, y_train, epochs=20) # cross-validation: get predicted target values y_hat = model.predict(X_test, batch_size=128) mse_prediction = model.evaluate(X_test, y_test, batch_size=128) print('\MSE in prediction = ', mse_prediction) # correlation btw predicted and observed corr = np.corrcoef(y_test, y_hat[:,0])[0,1] print('\Corr obs vs pred =', corr) # plot observed vs predicted targets plt.title('MLP: observed vs predicetd Y') plt.ylabel('Predicted') plt.xlabel('Observed') plt.scatter(y_test, y_hat, marker='o') plt.show() # + [markdown] papermill={"duration": 0.062546, "end_time": "2022-04-01T13:41:21.571098", "exception": false, "start_time": "2022-04-01T13:41:21.508552", "status": "completed"} tags=[] # **Controlling overfit: regularization, dropout and early stopping** # + papermill={"duration": 1.638096, "end_time": "2022-04-01T13:41:23.272514", "exception": false, "start_time": "2022-04-01T13:41:21.634418", "status": "completed"} tags=[] # deletes current model del model model = Sequential() # Add l1 & l2 regularization in first layer model.add(Dense(64, input_dim=nSNP, kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))) model.add(Activation('relu')) # Add second layer model.add(Dense(32)) model.add(Activation('softplus')) ## Adding dropout to second layer model.add(Dropout(0.2)) # Last, output layer model.add(Dense(1)) # Model Compiling (https://keras.io/models/sequential/) model.compile(loss='mean_squared_error', optimizer='sgd') # Split the train set into proper train & validation X_train0, X_val, y_train0, y_val = train_test_split(X_train, y_train, test_size=0.1) nEpochs=10 # Early stopping early_stopper = EarlyStopping(monitor='val_loss', patience=10, min_delta=0.01) model.fit(X_train0, y_train0, epochs=nEpochs, verbose=1, validation_data=(X_val, y_val), callbacks=[early_stopper]) # cross-validation mse_prediction = model.evaluate(X_test, y_test, batch_size=128) print('\nMSE in prediction =',mse_prediction) ## In this case neither l1 nor l2 regularization helps # + papermill={"duration": 0.071971, "end_time": "2022-04-01T13:41:23.415344", "exception": false, "start_time": "2022-04-01T13:41:23.343373", "status": "completed"} tags=[] # + papermill={"duration": 0.070583, "end_time": "2022-04-01T13:41:23.556974", "exception": false, "start_time": "2022-04-01T13:41:23.486391", "status": "completed"} tags=[] # + papermill={"duration": 0.072127, "end_time": "2022-04-01T13:41:23.700151", "exception": false, "start_time": "2022-04-01T13:41:23.628024", "status": "completed"} tags=[] # + papermill={"duration": 0.070869, "end_time": "2022-04-01T13:41:23.841883", "exception": false, "start_time": "2022-04-01T13:41:23.771014", "status": "completed"} tags=[]
Genomic Selection-with-Neural Network/gs-dl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fetch Ordnance Survey (Open) Data # # - Dowload files from OS OpenData API, save zip, open it in GeoPandas and dump to a PostGIS database. # - Process OS MasterMap Building height data # # To establish a connection to the database, this notebook is using docker environment variables. Start docker as `docker run -it -e DB_PORT=<port> -e DB_USER=<username> -e DB_PWD=<password> -e DB_HOST=<host> darribas/gds_dev:5.0`. # # + import os import zipfile import requests import fiona import geopandas as gpd from sqlalchemy import create_engine # - # ## OpenRoads # # ### Download from API # # Request the list of products available using Ordnance Survey API and filter `'OpenRoads'` record. response = requests.get('https://api.os.uk/downloads/v1/products') json = response.json() for product in json: if product['id'] == 'OpenRoads': url = product['url'] roads = requests.get(url) roads_json = roads.json() roads_json # Get download URLs. download = requests.get(roads_json['downloadsUrl']) download_json = download.json() download_json # Save zipped GeoPackage to disk. Since we do not know its contents, we can't read it directly to GeoPandas. for fileformat in download_json: if fileformat['format'] == 'GeoPackage': with open(fileformat['fileName'], "wb") as down: down.write(requests.get(fileformat['url']).content) down.close() # Explore the contents of the downloaded file. gpkg = zipfile.ZipFile(fileformat['fileName']) gpkg.namelist() # Read GeoPackage to GeoPandas. gdf = gpd.read_file('zip://' + fileformat['fileName'] + '!' + gpkg.namelist()[1]) gdf.head() # ### Save GeoDataFrame to PostGIS user = os.environ.get('DB_USER') pwd = <PASSWORD>('<PASSWORD>') host = os.environ.get('DB_HOST') port = os.environ.get('DB_PORT') db_connection_url = f"postgres+psycopg2://{user}:{pwd}@{host}:{port}/built_env" engine = create_engine(db_connection_url) gdf.to_postgis("openroads_200803", engine, if_exists='replace') # ## OpenMap - Local (building layer + barriers) # ### Download from API # # Now we get `url` of OpenMap - Local product, from which we extract buidling footprints and layers forming morphological barriers. # for product in json: if product['id'] == 'OpenMapLocal': url = product['url'] print(url) # Unlike above, OpenMap comes in split into tiles. We want the whole GB, so we have to specify it as `area`. # # + openmap = requests.get(url) openmap_json = openmap.json() download = requests.get(openmap_json['downloadsUrl']) download_json = download.json() for tile in download_json: if tile['area'] == 'GB' and tile['format'] == 'GML': url = tile['url'] filename = tile['fileName'] print(tile) # - with open(filename, "wb") as down: down.write(requests.get(url).content) down.close() # ### Save files to PostGIS # # Because downloaded zip still containes data split into tiles, we will have to iterate over them and append them individually to the same table. # # gml = zipfile.ZipFile(filename) gml.namelist() # Since `gml` files come with more layers that we are looking for, let's explore them and pick only those of interest. # fiona.listlayers('zip://' + filename + '!' + gml.namelist()[-1]) # Finally, we can iterate over the contents of downloaded zip and store relevant data in our PostGIS table. Note the `if_exists='append` option, which ensures that we do not overwrite one tile with another. There are tiles covering only sea - those do not have wanted (e.g. `Building`) layer. # # #### Buildings for file in gml.namelist(): if file.startswith('data/'): if 'Building' in fiona.listlayers('zip://' + filename + '!' + file): gdf = gpd.read_file('zip://' + filename + '!' + file, layer='Building') gdf.to_postgis("openmap_buildings_200814", engine, if_exists='append') else: print(file, 'does not contain Building layer.') # #### Railway for file in gml.namelist(): if file.startswith('data/'): if 'RailwayTrack' in fiona.listlayers('zip://' + filename + '!' + file): gdf = gpd.read_file('zip://' + filename + '!' + file, layer='RailwayTrack') gdf.to_postgis("openmap_railwaytrack_200824", engine, if_exists='append') else: print(file, 'does not contain Railway layer.') # #### Surface Water for file in gml.namelist(): if file.startswith('data/'): if 'SurfaceWater_Area' in fiona.listlayers('zip://' + filename + '!' + file): gdf = gpd.read_file('zip://' + filename + '!' + file, layer='SurfaceWater_Area') gdf.to_postgis("openmap_surfacewater_area_200824", engine, if_exists='append') else: print(file, 'does not contain SurfaceWater_Area layer.') if 'SurfaceWater_Line' in fiona.listlayers('zip://' + filename + '!' + file): gdf = gpd.read_file('zip://' + filename + '!' + file, layer='SurfaceWater_Line') gdf.to_postgis("openmap_surfacewater_line_200824", engine, if_exists='append') else: print(file, 'does not contain SurfaceWater_Line layer.') # #### Woodland for file in gml.namelist(): if file.startswith('data/'): if 'Woodland' in fiona.listlayers('zip://' + filename + '!' + file): gdf = gpd.read_file('zip://' + filename + '!' + file, layer='Woodland') gdf.to_postgis("openmap_woodland_200824", engine, if_exists='append') else: print(file, 'does not contain Woodland layer.') # #### Tidal water for file in gml.namelist(): if file.startswith('data/'): if 'TidalWater' in fiona.listlayers('zip://' + filename + '!' + file): gdf = gpd.read_file('zip://' + filename + '!' + file, layer='TidalWater') gdf.to_postgis("openmap_tidalwater_200908", engine, if_exists='append') else: print(file, 'does not contain TidalWater layer.') # #### Tidal Boundary # # Showing high water mark (coinciding with tidal water boundary) and low water mark. for file in gml.namelist(): if file.startswith('data/'): if 'TidalBoundary' in fiona.listlayers('zip://' + filename + '!' + file): gdf = gpd.read_file('zip://' + filename + '!' + file, layer='TidalBoundary') gdf.to_postgis("openmap_tidalboundary_200908", engine, if_exists='append') else: print(file, 'does not contain TidalBoundary layer.') # ## OpenRivers for product in json: if product['id'] == 'OpenRivers': url = product['url'] rivers = requests.get(url) rivers_json = rivers.json() rivers_json download = requests.get(rivers_json['downloadsUrl']) download_json = download.json() download_json for fileformat in download_json: if fileformat['format'] == 'GeoPackage': with open(fileformat['fileName'], "wb") as down: down.write(requests.get(fileformat['url']).content) down.close() gpkg = zipfile.ZipFile(fileformat['fileName']) gpkg.namelist() fiona.listlayers('zip://' + fileformat['fileName'] + '!' + gpkg.namelist()[1]) gdf = gpd.read_file('zip://' + fileformat['fileName'] + '!' + gpkg.namelist()[1], layer='WatercourseLink') gdf.to_postgis("openrivers_200909", engine, if_exists='replace') # ## Save samples of OS MasterMap Building height layer to parquet files # # Processing raw downloads of OS MasterMap Building height layer from `digimap.edina.ac.uk` to a single parquet file per city. Initial downloaded zip files have been extracted and renamed to indicate the location. The contents of each folder is unchanged. # # The structure of current directory: # # ``` # notebook.ipynb # glasgow/* # leeds/* # edinburgh/* # manchester/* # liverpool/* # ``` # # where `*` is the original content of downloaded zip file. # # First we get the list of cities based on folders: # cities = [city for city in glob.glob('**') if not city.endswith('ipynb')] # The data are stored in tiled `gdb` files which needs to be merged together first. We loop through `cities` and concatenate all `gdb` files in each into a single GeoDataFrame, which is then saved to a single parquet file. for city in cities: files = glob.glob(f'{city}/**/**/**') merged = pd.concat([gpd.read_file(file) for file in files if file.endswith('gdb')]) merged.to_parquet(f'{city}.pq') # ## Coastline # # To get the external boundary of the GB, we will also need coastline geometry. That is downloadable from [data.gov.uk](https://data.gov.uk/dataset/26053db7-6caf-446f-8f7e-9775a19970e0/countries-december-2017-full-extent-boundaries-in-great-britain). coastline = gpd.read_file('http://geoportal1-ons.opendata.arcgis.com/datasets/f2c2211ff185418484566b2b7a5e1300_1.zip?outSR={%22latestWkid%22:27700,%22wkid%22:27700}') coastline coastline.plot() coastline.to_postgis("gb_countries_2017", engine, if_exists='replace') # However, that proved to be unprecise. Coastline layer from Strategi product (although deprecated now) is better for our purpose. json = response.json() for product in json: if product['id'] == 'Strategi': url = product['url'] strategi = requests.get(url) strategi_json = strategi.json() strategi_json download = requests.get(strategi_json['downloadsUrl']) download_json = download.json() download_json for fileformat in download_json: if fileformat['format'] == 'ESRI® Shapefile': with open(fileformat['fileName'], "wb") as down: down.write(requests.get(fileformat['url']).content) down.close() gml = zipfile.ZipFile('strtgi_essh_gb.zip') gml.namelist() coastline = gpd.read_file('zip://strtgi_essh_gb.zip!strtgi_essh_gb/data/coastline.shp') coastline.to_postgis("gb_coastline_2016", engine, if_exists='replace')
vector_data/Fetch_OS_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1 align="center">Registration Settings: Choices, Choices, Choices</h1> # # The performance of most registration algorithms is dependent on a large number of parameter settings. For optimal performance you will need to customize your settings, turning all the knobs to their "optimal" position:<br> # <img src="knobs.jpg" style="width:700px"/> # <font size="1"> [This image was originally posted to Flickr and downloaded from wikimedia commons https://commons.wikimedia.org/wiki/File:TASCAM_M-520_knobs.jpg]</font> # # This notebook illustrates the use of reference data (a.k.a "gold" standard) to empirically tune a registration framework for specific usage. This is dependent on the characteristics of your images (anatomy, modality, image's physical spacing...) and on the clinical needs. # # Also keep in mind that the defintion of optimal settings does not necessarily correspond to those that provide the most accurate results. # # The optimal settings are task specific and should provide: # <ul> # <li>Sufficient accuracy in the Region Of Interest (ROI).</li> # <li>Complete the computation in the alloted time.</li> # </ul> # # We will be using the training data from the Retrospective Image Registration Evaluation (<a href="http://www.insight-journal.org/rire/">RIRE</a>) project. # + import SimpleITK as sitk # Utility method that either downloads data from the network or # if already downloaded returns the file name for reading from disk (cached data). # %run update_path_to_download_script from downloaddata import fetch_data as fdata # Always write output to a separate directory, we don't want to pollute the source directory. OUTPUT_DIR = 'Output' import registration_callbacks as rc import registration_utilities as ru # %matplotlib inline # - # ### Read the RIRE data and generate a larger point set as a reference # + fixed_image = sitk.ReadImage(fdata("training_001_ct.mha"), sitk.sitkFloat32) moving_image = sitk.ReadImage(fdata("training_001_mr_T1.mha"), sitk.sitkFloat32) fixed_fiducial_points, moving_fiducial_points = ru.load_RIRE_ground_truth(fdata("ct_T1.standard")) # Estimate the reference_transform defined by the RIRE fiducials and check that the FRE makes sense (low) R, t = ru.absolute_orientation_m(fixed_fiducial_points, moving_fiducial_points) reference_transform = sitk.Euler3DTransform() reference_transform.SetMatrix(R.flatten()) reference_transform.SetTranslation(t) reference_errors_mean, reference_errors_std, _, reference_errors_max,_ = ru.registration_errors(reference_transform, fixed_fiducial_points, moving_fiducial_points) print('Reference data errors (FRE) in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(reference_errors_mean, reference_errors_std, reference_errors_max)) # Generate a reference dataset from the reference transformation # (corresponding points in the fixed and moving images). fixed_points = ru.generate_random_pointset(image=fixed_image, num_points=100) moving_points = [reference_transform.TransformPoint(p) for p in fixed_points] # Compute the TRE prior to registration. pre_errors_mean, pre_errors_std, pre_errors_min, pre_errors_max, _ = ru.registration_errors(sitk.Euler3DTransform(), fixed_points, moving_points, display_errors = True) print('Before registration, errors (TRE) in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(pre_errors_mean, pre_errors_std, pre_errors_max)) # - # ### Initial Alignment # # We use the CenteredTransformInitializer. Should we use the GEOMETRY based version or the MOMENTS based one? # + initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(fixed_image,moving_image.GetPixelID()), moving_image, sitk.Euler3DTransform(), sitk.CenteredTransformInitializerFilter.GEOMETRY) initial_errors_mean, initial_errors_std, initial_errors_min, initial_errors_max, _ = ru.registration_errors(initial_transform, fixed_points, moving_points, min_err=pre_errors_min, max_err=pre_errors_max, display_errors=True) print('After initialization, errors (TRE) in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(initial_errors_mean, initial_errors_std, initial_errors_max)) # - # ## Registration # # Possible choices for simple rigid multi-modality registration framework (<b>300</b> component combinations, in addition to parameter settings for each of the components): # <ul> # <li>Similarity metric, 2 options (Mattes MI, JointHistogram MI): # <ul> # <li>Number of histogram bins.</li> # <li>Sampling strategy, 3 options (NONE, REGULAR, RANDOM)</li> # <li>Sampling percentage.</li> # </ul> # </li> # <li>Interpolator, 10 options (sitkNearestNeighbor, sitkLinear, sitkGaussian, sitkBSpline,...)</li> # <li>Optimizer, 5 options (GradientDescent, GradientDescentLineSearch, RegularStepGradientDescent...): # <ul> # <li>Number of iterations.</li> # <li>learning rate (step size along parameter space traversal direction).</li> # </ul> # </li> # </ul> # # In this example we will plot the similarity metric's value and more importantly the TREs for our reference data. A good choice for the former should be reflected by the later. That is, the TREs should go down as the similarity measure value goes down (not necessarily at the same rates). # # Finally, we are also interested in timing our registration. Ipython allows us to do this with minimal effort using the <a href="http://ipython.org/ipython-doc/stable/interactive/magics.html?highlight=timeit#magic-timeit">timeit</a> cell magic (Ipython has a set of predefined functions that use a command line syntax, and are referred to as magic functions). # + # #%%timeit -r1 -n1 # to time this cell uncomment the line above #the arguments to the timeit magic specify that this cell should only be run once. running it multiple #times to get performance statistics is also possible, but takes time. if you want to analyze the accuracy #results from multiple runs you will have to modify the code to save them instead of just printing them out. registration_method = sitk.ImageRegistrationMethod() registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.01) registration_method.SetInterpolator(sitk.sitkNearestNeighbor) #2. Replace with sitkLinear registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100) #1. Increase to 1000 registration_method.SetOptimizerScalesFromPhysicalShift() # Don't optimize in-place, we would like to run this cell multiple times registration_method.SetInitialTransform(initial_transform, inPlace=False) # Add callbacks which will display the similarity measure value and the reference data during the registration process registration_method.AddCommand(sitk.sitkStartEvent, rc.metric_and_reference_start_plot) registration_method.AddCommand(sitk.sitkEndEvent, rc.metric_and_reference_end_plot) registration_method.AddCommand(sitk.sitkIterationEvent, lambda: rc.metric_and_reference_plot_values(registration_method, fixed_points, moving_points)) final_transform_single_scale = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32)) print('Final metric value: {0}'.format(registration_method.GetMetricValue())) print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription())) final_errors_mean, final_errors_std, _, final_errors_max,_ = ru.registration_errors(final_transform_single_scale, fixed_points, moving_points, min_err=initial_errors_min, max_err=initial_errors_max, display_errors=True) print('After registration, errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(final_errors_mean, final_errors_std, final_errors_max)) # - # In some cases visual comparison of the registration errors using the same scale is not informative, as seen above [all points are grey/black]. We therefor set the color scale to the min-max error range found in the current data and not the range from the previous stage. final_errors_mean, final_errors_std, _, final_errors_max,_ = ru.registration_errors(final_transform_single_scale, fixed_points, moving_points, display_errors=True) # ### Now using the built in multi-resolution framework # # Perform registration using the same settings as above, but take advantage of the multi-resolution framework which provides a significant speedup with minimal effort (3 lines of code). # # It should be noted that when using this framework the similarity metric value will not necessarily decrease between resolutions, we are only ensured that it decreases per resolution. This is not an issue, as we are actually observing the values of a different function at each resolution. # # The example below shows that registration is improving even though the similarity value increases when changing resolution levels. # + # #%%timeit -r1 -n1 #the arguments to the timeit magic specify that this cell should only be run once. running it multiple #times to get performance statistics is also possible, but takes time. if you want to analyze the accuracy #results from multiple runs you will have to modify the code to save them instead of just printing them out. registration_method = sitk.ImageRegistrationMethod() registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.1) registration_method.SetInterpolator(sitk.sitkLinear) #2. Replace with sitkLinear registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100) registration_method.SetOptimizerScalesFromPhysicalShift() # Don't optimize in-place, we would like to run this cell multiple times registration_method.SetInitialTransform(initial_transform, inPlace=False) # Add callbacks which will display the similarity measure value and the reference data during the registration process registration_method.AddCommand(sitk.sitkStartEvent, rc.metric_and_reference_start_plot) registration_method.AddCommand(sitk.sitkEndEvent, rc.metric_and_reference_end_plot) registration_method.AddCommand(sitk.sitkIterationEvent, lambda: rc.metric_and_reference_plot_values(registration_method, fixed_points, moving_points)) registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1]) registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0]) registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn() final_transform = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32)) print('Final metric value: {0}'.format(registration_method.GetMetricValue())) print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription())) final_errors_mean, final_errors_std, _, final_errors_max,_ = ru.registration_errors(final_transform, fixed_points, moving_points, True) print('After registration, errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(final_errors_mean, final_errors_std, final_errors_max)) # - # ### Sufficient accuracy <u>inside</u> the ROI # # Up to this point our accuracy evaluation has ignored the content of the image and is likely overly conservative. We have been looking at the registration errors inside the volume, but not necessarily in the smaller ROI. # # To see the difference you will have to <b>comment out the timeit magic in the code above</b>, run it again, and then run the following cell. # + # Threshold the original fixed, CT, image at 0HU (water), resulting in a binary labeled [0,1] image. roi = fixed_image> 0 # Our ROI consists of all voxels with a value of 1, now get the bounding box surrounding the head. label_shape_analysis = sitk.LabelShapeStatisticsImageFilter() label_shape_analysis.SetBackgroundValue(0) label_shape_analysis.Execute(roi) bounding_box = label_shape_analysis.GetBoundingBox(1) # Bounding box in physical space. sub_image_min = fixed_image.TransformIndexToPhysicalPoint((bounding_box[0],bounding_box[1], bounding_box[2])) sub_image_max = fixed_image.TransformIndexToPhysicalPoint((bounding_box[0]+bounding_box[3]-1, bounding_box[1]+bounding_box[4]-1, bounding_box[2]+bounding_box[5]-1)) # Only look at the points inside our bounding box. sub_fixed_points = [] sub_moving_points = [] for fixed_pnt, moving_pnt in zip(fixed_points, moving_points): if sub_image_min[0]<=fixed_pnt[0]<=sub_image_max[0] and \ sub_image_min[1]<=fixed_pnt[1]<=sub_image_max[1] and \ sub_image_min[2]<=fixed_pnt[2]<=sub_image_max[2] : sub_fixed_points.append(fixed_pnt) sub_moving_points.append(moving_pnt) final_errors_mean, final_errors_std, _, final_errors_max,_ = ru.registration_errors(final_transform, sub_fixed_points, sub_moving_points, True) print('After registration, errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(final_errors_mean, final_errors_std, final_errors_max)) # -
Python/62_Registration_Tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ed-chin-git/DS-Unit-2-Sprint-3-Advanced-Regression/blob/master/module3-quantile-regression/LS_DS1_233_Quantile_Regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="WcUKgwzXgV-b" colab_type="text" # Lecture [video](https://www.youtube.com/watch?v=7lJWEMMSfoA&feature=youtu.be) # # Getting Started with [Quantile Regression](https://data.library.virginia.edu/getting-started-with-quantile-regression/) # # # # + [markdown] id="SV7gaADiicnV" colab_type="text" # # Lambda School Data Science - Quantile Regression # # Regressing towards the median - or any quantile - as a way to mitigate outliers and control risk. # + [markdown] id="6klMj4q3iqMh" colab_type="text" # ## Lecture # # Let's look at data that has a bit of a skew to it: # # http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data # + id="yw1AD_z9O0xL" colab_type="code" colab={} import pandas as pd df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/' '00381/PRSA_data_2010.1.1-2014.12.31.csv') # + id="RTlH1lJ8PDv5" colab_type="code" outputId="d7c8c81c-9d68-4809-f91b-a879b0337867" colab={"base_uri": "https://localhost:8080/", "height": 206} df.head() # + id="m-yC9OSPPFo8" colab_type="code" outputId="dea06251-bdc0-44c9-d6b5-3c9efd9ad19f" colab={"base_uri": "https://localhost:8080/", "height": 320} df.describe() # + id="hfV3WisFP_O6" colab_type="code" outputId="f4182145-f7db-46cd-8f2a-507293d220b1" colab={"base_uri": "https://localhost:8080/", "height": 347} df['pm2.5'].plot.hist(); # + id="u2DTml6zvd_5" colab_type="code" outputId="6304b93d-2273-4bdd-ed85-7866c6ccbbc7" colab={"base_uri": "https://localhost:8080/", "height": 1053} import numpy as np df['pm2.5'].dropna().apply(np.log).plot.hist(); # + id="OgbMTAHzQJB8" colab_type="code" outputId="332c8801-2d19-459b-f952-5c8ce247e010" colab={"base_uri": "https://localhost:8080/", "height": 34} # How does linear regression handle it? from sklearn.linear_model import LinearRegression # Let's drop NAs and limit to numeric values df = df._get_numeric_data().dropna() X = df.drop('pm2.5', axis='columns') y = df['pm2.5'] linear_reg = LinearRegression().fit(X, y) linear_reg.score(X, y) # + id="1Tv8r_xSxMwZ" colab_type="code" outputId="4a7dcc1d-0188-4d1f-9438-73bab479d76f" colab={"base_uri": "https://localhost:8080/", "height": 34} ','.join(['1', '2', '3']) # import csv # + id="-viFFtm0RizM" colab_type="code" outputId="d0ff81a8-7937-48d3-92a0-d09bb277d52b" colab={"base_uri": "https://localhost:8080/", "height": 462} # Not bad - but what if we wanted to model the distribution more conservatively? # Let's try quantile import statsmodels.formula.api as smf # Different jargon/API in StatsModel documentation # "endogenous" response var is dependent (y), it is "inside" # "exogenous" variables are independent (X), it is "outside" # Bonus points - talk about "exogenous shocks" and you're a bona fide economist # ~ style formulas look like what R uses # y ~ x1 + x2 + ... # They can also support * for interaction terms and polynomials # y ~ x1 + (x1 * x1) + x2 + (x1 * x2) # Also, these formulas break with . in variable name, so lets change that df = df.rename(index=str, columns={'pm2.5': 'pm25'}) # Now let's construct the formula string using all columns quant_formula = 'pm25 ~ ' + ' + '.join(df.drop('pm25', axis='columns').columns) print(quant_formula) quant_mod = smf.quantreg(quant_formula, data=df) quant_reg = quant_mod.fit(q=.5) quant_reg.summary() # "summary" is another very R-thing # + [markdown] id="ZBkP4bewd-HT" colab_type="text" # That fit to the median (q=0.5), also called "Least Absolute Deviation." The pseudo-R^2 isn't really directly comparable to the R^2 from linear regression, but it clearly isn't dramatically improved. Can we make it better? # + id="BgvYeHg3bL4g" colab_type="code" outputId="bf4547a0-7739-45d8-bf5a-26ab1684f7f6" colab={"base_uri": "https://localhost:8080/", "height": 593} help(quant_mod.fit) # + [markdown] id="_UdyTFEAy4Mr" colab_type="text" # Pretend R code # # ``` # ols1 <- lm(y ~ x1 + x2 + (x1 * x1)) # summary(ols1) # ``` # + id="lpNPioZTei4U" colab_type="code" outputId="0c8ad055-6b0e-4eb6-acc9-815746221509" colab={"base_uri": "https://localhost:8080/", "height": 1424} quantiles = (.05, .96, .1) for quantile in quantiles: print(quant_mod.fit(q=quantile).summary()) # + [markdown] id="Xqh4Jp1XgjrE" colab_type="text" # "Strong multicollinearity", eh? In other words - maybe we shouldn't throw every variable in our formula. Let's hand-craft a smaller one, picking the features with the largest magnitude t-statistics for their coefficients. Let's also search for more quantile cutoffs to see what's most effective. # + id="NmoELnXwgpXd" colab_type="code" outputId="408e106d-3ab2-4918-f9f8-8a0b00fcad54" colab={"base_uri": "https://localhost:8080/", "height": 975} quant_formula = 'pm25 ~ DEWP + TEMP + Ir + hour + Iws' quant_mod = smf.quantreg(quant_formula, data=df) for quantile in range(50, 100): quantile /= 100 quant_reg = quant_mod.fit(q=quantile) print((quantile, quant_reg.prsquared)) # + id="Bz0GmE5kuwQY" colab_type="code" outputId="d0205ec3-ea15-4976-e5a8-a7c5e479ba42" colab={"base_uri": "https://localhost:8080/", "height": 316} # Okay, this data seems *extremely* skewed # Let's trying logging import numpy as np df['pm25'] = np.log(1 + df['pm25']) quant_mod = smf.quantreg(quant_formula, data=df) quant_reg = quant_mod.fit(q=.25) quant_reg.summary() # "summary" is another very R-thing # + [markdown] id="8kXcxnNBgizX" colab_type="text" # Overall - in this case, quantile regression is not *necessarily* superior to linear regression. But it does give us extra flexibility and another thing to tune - what the center of what we're actually fitting in the dependent variable. # # The basic case of `q=0.5` (the median) minimizes the absolute value of residuals, while OLS minimizes the squared value. By selecting `q=0.25`, we're targeting a lower quantile and are effectively saying that we only want to over-estimate at most 25% of the time - we're being *risk averse*. # # Depending on the data you're looking at, and the cost of making a false positive versus a false negative, this sort of flexibility can be extremely useful. # # Live - let's consider another dataset! Specifically, "SkillCraft" (data on competitive StarCraft players): http://archive.ics.uci.edu/ml/datasets/SkillCraft1+Master+Table+Dataset # + id="ofvwSAZUhWDw" colab_type="code" outputId="be48df45-cac6-4820-fec1-f902c0d498a5" colab={"base_uri": "https://localhost:8080/", "height": 226} # TODO Live! # Hint - we may only care about the *top* quantiles here # Another hint - there are missing values, but Pandas won't see them right away import pandas as pd df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/' '00272/SkillCraft1_Dataset.csv') df.head() # + id="xWXVRHL18imt" colab_type="code" outputId="a2336160-4e13-416b-8d58-dbdae6410148" colab={"base_uri": "https://localhost:8080/", "height": 34} df.shape # + id="dU-LofZo8leg" colab_type="code" outputId="9c58c393-175f-4f42-c570-0e93b5691e1a" colab={"base_uri": "https://localhost:8080/", "height": 382} df.isna().sum() # + id="IOCXPx1i8ppS" colab_type="code" outputId="786687d2-339c-45eb-9b69-909cea1bb344" colab={"base_uri": "https://localhost:8080/", "height": 382} import numpy as np df = df.replace('?', np.nan) df.isna().sum() # + id="hJcVhajW9voD" colab_type="code" colab={} # Quinn's cleaning code hasna = ['Age', 'HoursPerWeek', 'TotalHours'] for feat in hasna: df[feat] = pd.to_numeric(df[feat]) #df[hasna].head() # + id="6rZVRcbW8y43" colab_type="code" outputId="d6c4e536-09d3-4c64-c6b5-d81d41533303" colab={"base_uri": "https://localhost:8080/", "height": 320} df.describe() # + id="X3ZUVDBG9L89" colab_type="code" outputId="8b03630e-7338-481f-db8a-4a5a796281fb" colab={"base_uri": "https://localhost:8080/", "height": 382} df.dtypes # + id="c8HPGuB49WYU" colab_type="code" outputId="2ec05665-0ae3-44cc-cd56-7ea2850b7cf9" colab={"base_uri": "https://localhost:8080/", "height": 34} # How does linear regression handle it? Courtesy of Daniel from sklearn.linear_model import LinearRegression # Let's drop NAs and limit to numeric values df = df._get_numeric_data().dropna() X = df.drop('APM', axis='columns') y = df['APM'] linear_reg = LinearRegression().fit(X, y) linear_reg.score(X, y) # + id="q_AWFmKB_hOW" colab_type="code" outputId="5a64194c-197f-41b0-ee13-45e3993cbd1a" colab={"base_uri": "https://localhost:8080/", "height": 347} for name, coef in zip(X.columns, linear_reg.coef_): print(name, coef) # + id="7dlWiKh5AMvV" colab_type="code" outputId="22276b9f-0775-4729-d400-d14d8abeb787" colab={"base_uri": "https://localhost:8080/", "height": 156} df.LeagueIndex.value_counts() # + id="2sZkjxfl9qA-" colab_type="code" outputId="6d3823a9-7fd3-4755-e9b9-bf85d86b5417" colab={"base_uri": "https://localhost:8080/", "height": 670} # Let's say we want to answer "What are the fastest 10% of SC players like?" # That means quantile regression with q=0.9 # First we need a formula, with our friend twiddle quant_formula = 'APM ~ ' + ' + '.join(df.drop('APM', axis='columns').columns) # From Daniel quant_mod = smf.quantreg(quant_formula, data=df) quant_reg = quant_mod.fit(q=.9) quant_reg.summary() # + id="_gXrjTOtFkjJ" colab_type="code" outputId="2d076942-08bc-4847-8cd0-1e19a461f34a" colab={"base_uri": "https://localhost:8080/", "height": 208} quant_reg.predict(X)[:10] # + id="QEcHSHzeFvx8" colab_type="code" outputId="4518356c-e80e-40f0-9205-ba669bb12a69" colab={"base_uri": "https://localhost:8080/", "height": 69} linear_reg.predict(X)[:10] # + id="yupp6rY3DHko" colab_type="code" outputId="2344fa70-bb5a-4966-b5bc-04c316334eef" colab={"base_uri": "https://localhost:8080/", "height": 1094} df.TotalHours.value_counts() # + [markdown] id="o2BADEQUirXa" colab_type="text" # ## Assignment - birth weight data # # Birth weight is a situation where, while the data itself is actually fairly normal and symmetric, our main goal is actually *not* to model mean weight (via OLS), but rather to identify mothers at risk of having children below a certain "at-risk" threshold weight. # # Quantile regression gives us just the tool we need. For the data we are using, see: http://people.reed.edu/~jones/141/BirthWgt.html # # bwt: baby's weight in ounces at birth # gestation: duration of pregnancy in days # parity: parity indicator (first born = 1, later birth = 0) # age: mother's age in years # height: mother's height in inches # weight: mother's weight in pounds (during pregnancy) # smoke: indicator for whether mother smokes (1=yes, 0=no) # # Use this data and `statsmodels` to fit a quantile regression, predicting `bwt` (birth weight) as a function of the other covariates. First, identify an appropriate `q` (quantile) to target a cutoff of 90 ounces - babies above that birth weight are generally healthy/safe, babies below are at-risk. # # Then, fit and iterate your model. Be creative! You may want to engineer features. Hint - mother's age likely is not simply linear in its impact, and the other features may interact as well. # # At the end, create at least *2* tables and *1* visualization to summarize your best model. Then (in writing) answer the following questions: # # - What characteristics of a mother indicate the highest likelihood of an at-risk (low weight) baby? # - What can expectant mothers be told to help mitigate this risk? # # Note that second question is not exactly a data science question - and that's okay! You're not expected to be a medical expert, but it is a good exercise to do a little bit of digging into a particular domain and offer informal but informed opinions. # + [markdown] id="Lias_WgthTTt" colab_type="text" # ###_Use this data and `statsmodels` to fit a quantile regression, predicting `bwt` (birth weight) as a function of the other covariates. First, identify an appropriate `q` (quantile) to target a cutoff of 90 ounces - babies above that birth weight are generally healthy/safe, babies below are at-risk._ # # Is this really the correct way to think about quantile regression? From what I'm reading, it seems like the application of quantile regression for this problem is: # 1. Select 'q' based on some confidence measure (e.g. i only want to overestimate a baby's weight 5% of the time) # 2. Estimate quantile regression using parameter # 3. Make predictions using new data with the assumption I will only overestimate baby weight 5% of the time. Any babies whose *predicted* weight is below 90 oz, take necessary corrective actions # # 'q' really doesnt have much to do with our 'cutoff' value, it has much more to do with our desired confidence in predictive output # # **<NAME>** [2:56 PM] # True that "cutoff" is perhaps a harsh way to see q - "target" may be better. The general idea is you're fitting a linear model like OLS, but instead of optimizing for predictions that minimize error relative to mean (expected value), you want to minimize error relative to some quantile ("target", "cutoff", etc.) # # The result is yes, a model that should avoid overestimating babies based on q, and that's definitely a good way to explain it # # # # * <NAME> # * <NAME>- # * <NAME> # # + id="HUWKv16FjZsY" colab_type="code" outputId="797af29d-28bd-4d70-879e-0e2627e1086a" colab={"base_uri": "https://localhost:8080/", "height": 221} import pandas as pd import numpy as np import matplotlib.pyplot as plt import scipy.stats as stats import statsmodels.formula.api as smf bwt_df = pd.read_csv('http://people.reed.edu/~jones/141/Bwt.dat') print(bwt_df.shape) bwt_df.head() # + id="G5uMYdDPuVuy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="20f5357b-6c8e-4cf1-8877-d393fcc0f58f" bwt_df.isna().sum() # + id="dy5FkUZpkJT_" colab_type="code" outputId="501a728f-6d77-4135-e3cd-f665bcfab0bb" colab={"base_uri": "https://localhost:8080/", "height": 297} bwt_df.describe() # + id="iamNG4VNvIDI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 364} outputId="cf347d79-bb6e-4bb7-85dc-ef8837584c16" bwt_df.bwt.hist() # + id="liO453ZovMUk" colab_type="code" colab={} newborn = newborn._get_numeric_data().dropna() q=stats.percentileofscore(bwt_df.bwt,90)/100 qt_formula='bwt ~ '+' + '.join(bwt_df.drop(['bwt'], axis='columns').columns) qt_formula qt_mod= smf.quantreg(qt_formula, data=bwt_df) quant_reg= # + [markdown] id="LjCeoCnPm8iG" colab_type="text" # ### # + [markdown] id="XY9JGAnJisdB" colab_type="text" # ## Resources and stretch goals # + [markdown] id="inFWXSpqmND5" colab_type="text" # Resources: # - [statsmodels QuantReg example](http://www.statsmodels.org/dev/examples/notebooks/generated/quantile_regression.html) # - [How Shopify used Quantile Regression in modeling risk](https://medium.com/data-shopify/how-shopify-capital-uses-quantile-regression-to-help-merchants-succeed-10ee1b36b17d) # # Stretch goals: # - Find a dataset where you think quantile regression may be appropriate, and try both it and linear regression - compare/contrast their strengths/weaknesses, and write a summary for which you think is better for the situation and why # - Check out [deep quantile regression](https://www.kdnuggets.com/2018/07/deep-quantile-regression.html), an approach that uses a custom quantile loss function and Keras to train a quantile model
module3-quantile-regression/LS_DS1_233_Quantile_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pyvizenv] * # language: python # name: conda-env-pyvizenv-py # --- import numpy as np import pandas as pd from pathlib import Path # %matplotlib inline import statsmodels.api as sm from statsmodels.tsa.arima_model import ARMA import warnings warnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA') from statsmodels.tsa.arima_model import ARIMA from arch import arch_model # # Return Forecasting: Read Historical Daily Yen Futures Data # In this notebook, you will load historical Dollar-Yen exchange rate futures data and apply time series analysis and modeling to determine whether there is any predictable behavior. # Futures contract on the Yen-dollar exchange rate: # This is the continuous chain of the futures contracts that are 1 month to expiration yen_futures = pd.read_csv( Path("yen.csv"), index_col="Date", infer_datetime_format=True, parse_dates=True ) yen_futures.head() # Trim the dataset to begin on January 1st, 1990 yen_futures = yen_futures.loc["1990-01-01":, :] yen_futures.head() # # Return Forecasting: Initial Time-Series Plotting # Start by plotting the "Settle" price. Do you see any patterns, long-term and/or short? # Plot just the "Settle" column from the dataframe: yen_futures.Settle.plot(figsize=(15, 10)) # --- # # Decomposition Using a Hodrick-Prescott Filter # Using a Hodrick-Prescott Filter, decompose the Settle price into a trend and noise. # + import statsmodels.api as sm # Apply the Hodrick-Prescott Filter by decomposing the "Settle" price into two separate series: yen_noise, yen_trend = sm.tsa.filters.hpfilter(yen_futures['Settle']) yen_noise.head() # + # Create a dataframe of just the settle price, and add columns for "noise" and "trend" series from above: yen_df = yen_futures['Settle'].to_frame() yen_df['yen_noise'] = yen_noise.to_frame() yen_df['yen_trend'] = yen_trend.to_frame() yen_df.head() # - # Plot the Settle Price vs. the Trend for 2015 to the present yen_df = yen_df.loc["2015":] yen_df.plot(y=['Settle','yen_trend'], figsize=(10,5)) # Plot the Settle Noise yen_df.plot(y=['Settle','yen_noise'], figsize=(10,5)) # --- # # Forecasting Returns using an ARMA Model # Using futures Settle *Returns*, estimate an ARMA model # # 1. ARMA: Create an ARMA model and fit it to the returns data. Note: Set the AR and MA ("p" and "q") parameters to p=2 and q=1: order=(2, 1). # 2. Output the ARMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)? # 3. Plot the 5-day forecast of the forecasted returns (the results forecast from ARMA model) # Create a series using "Settle" price percentage returns, drop any nan"s, and check the results: # (Make sure to multiply the pct_change() results by 100) # In this case, you may have to replace inf, -inf values with np.nan"s returns = (yen_futures[["Settle"]].pct_change() * 100) returns = returns.replace(-np.inf, np.nan).dropna() returns.tail() # + # Estimate and ARMA model using statsmodels (use order=(2, 1)) model = ARMA(returns.values, order=(2,1)) # Fit the model and assign it to a variable called results results = model.fit() # + # Output model summary results: pd.DataFrame(results.forecast(steps=10)[0]).plot(title="yen Return Forecast") last_price = yen_df.Settle.iloc[-1] forecast_10 = results.forecast(steps=10)[0] + 1 * last_price print(f'the returns for the next 10 days are:{forecast_10}') # - # Plot the 5 Day Returns Forecast pd.DataFrame(results.forecast(steps=5)[0]).plot(title="yen Return Forecast") results.summary() # --- # # Forecasting the Settle Price using an ARIMA Model # 1. Using the *raw* Yen **Settle Price**, estimate an ARIMA model. # 1. Set P=5, D=1, and Q=1 in the model (e.g., ARIMA(df, order=(5,1,1)) # 2. P= # of Auto-Regressive Lags, D= # of Differences (this is usually =1), Q= # of Moving Average Lags # 2. Output the ARIMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)? # 3. Construct a 5 day forecast for the Settle Price. What does the model forecast will happen to the Japanese Yen in the near term? # + # Estimate and ARIMA Model: # Hint: ARIMA(df, order=(p, d, q)) model = ARIMA(yen_futures.Settle, order=(5, 1, 1)) # Fit the model results2 = model.fit() # - # Output model summary results: results2.summary() # Plot the 5 Day Price Forecast pd.DataFrame(results2.forecast(steps=5)[0]).plot(title="yen Return Forecast") results2 # # --- # # Volatility Forecasting with GARCH # # Rather than predicting returns, let's forecast near-term **volatility** of Japanese Yen futures returns. Being able to accurately predict volatility will be extremely useful if we want to trade in derivatives or quantify our maximum loss. # # Using futures Settle *Returns*, estimate an GARCH model # # 1. GARCH: Create an GARCH model and fit it to the returns data. Note: Set the parameters to p=2 and q=1: order=(2, 1). # 2. Output the GARCH summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)? # 3. Plot the 5-day forecast of the volatility. # + # Estimate a GARCH model: arch_df = (yen_futures[["Settle"]].pct_change() * 100).dropna() model = arch_model(arch_df, mean="Zero", vol="GARCH", p=2, q=1) # Fit the model res2 = model.fit(disp="in") res2 = model.fit(disp="on") # - # Summarize the model results res2.summary() # Find the last day of the dataset last_day2 = returns.index.max().strftime('%Y-%m-%d') last_day2 # Create a 5 day forecast of volatility forecast_horizon = 5 # Start the forecast using the last_day calculated above forecasts= res2.forecast(start=last_day2, horizon=forecast_horizon) forecasts # Annualize the forecast intermediate = np.sqrt(forecasts.variance.dropna() * 252) intermediate # Transpose the forecast so that it is easier to plot final2 = intermediate.dropna().T final2.head() # Plot the final forecast final2 = intermediate.dropna().T final2.plot() # --- # # Conclusions # Based on your time series analysis, would you buy the yen now? # # Is the risk of the yen expected to increase or decrease? # # Based on the model evaluation, would you feel confident in using these models for trading? # # * Analyze the yen to decide whether to make a purchase. # I would make a purcharchase. our yearly forecats shows growth for the next five periods # * Analyze the risk of the yen. # the volatility is increasing for the next 5 days as the plot shows, i would be a good time to buy, for a long position # * Analyze the confidence of models as a basis for trading. # the confidance of this model its, positive, and I think it could be use for traiding for a long term position. # # # #
Starter_Code/time_series_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <NAME>, 17PH20006 import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score import seaborn as sns import random f=66 random.seed(f) np.random.seed(f) # + columns=["Class","Alcohol","Malic acid","Ash","Alcalinity of ash","Magnesium"\ ,"Total phenols","Flavanoids","Nonflavanoid phenols","Proanthocyanins","Color intensity","Hue"\ ,"OD280/OD315 of diluted wines","Proline"] df=pd.read_csv("assignment2/wine.data", names=columns) # - df.head() df.describe() X=df.drop("Class",axis=1).values display(X) Y=df.Class.values display(Y) print("X shape = ",X.shape) print("Y shape = ",Y.shape) X_train,X_test,Y_train,y_test = train_test_split(X,Y,test_size=0.2,random_state=42, stratify=Y) #random_state:control the shuffling,stratify:data is split in a stratified fashion,using this as the class labels # # Observing accuracy for different values of k # + #import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier #Setting up arrays to store training and test accuracies neighbors = np.arange(1,15) # K=1,2....15 train_accuracy =np.empty(len(neighbors)) #Return a new array of size 15 ,containing accuracy for k=1,..15 test_accuracy = np.empty(len(neighbors)) for i,k in enumerate(neighbors): #Setup a knn classifier with k neighbors knn = KNeighborsClassifier(n_neighbors=k) #Fit the model knn.fit(X_train, Y_train) #Compute accuracy on the training set train_accuracy[i] = knn.score(X_train, Y_train) #Compute accuracy on the test set test_accuracy[i] = knn.score(X_test, y_test) # - #Generate plot plt.title('k-NN Varying number of neighbors') plt.plot(neighbors, test_accuracy, label='Testing Accuracy') plt.plot(neighbors, train_accuracy, label='Training accuracy') plt.legend() plt.xlabel('Number of neighbors') plt.ylabel('Accuracy') plt.show() #Setup a knn classifier with k neighbors as 9 gives a maxima in testing accuracy knn = KNeighborsClassifier(n_neighbors=9) # # Training the Model # Fit the model knn.fit(X_train,Y_train) # # Accuracy #Get accuracy. Note: In case of classification algorithms score method represents accuracy. print(knn.score(X_test,y_test)) print("Accuracy is :",str(100*knn.score(X_test,y_test))+"%") # # Confusion Matrix, Precision,Recall,F1-score #import confusion_matrix from sklearn.metrics import confusion_matrix #let predict for X_test using the classifier we had fit above y_pred = knn.predict(X_test) confusion_matrix(y_test,y_pred) #1 points which are actually belongs to class 1,are predicted as class 1 # # Classification report #import classification_report from sklearn.metrics import classification_report print(classification_report(y_test,y_pred)) # # Condensed KNN # + X_1=np.hstack((X_train,Y_train.reshape(len(Y_train),1))) #print(X_1) #print(X_1.shape) temp2=X_1.shape[0] type(X_1) Z=X_1[0,:] Z=Z.reshape(1,-1) X_1=np.delete(X_1,0,axis=0) #deleting 0'th row ct=0 while(1): temp=False i=np.random.randint(0,X_1.shape[0]) #print(i) dist_0=np.sqrt(np.sum((X_1[0,:]-Z[0,:])**2)) val=0 for j in range(Z.shape[0]): # dist_0=np.sqrt(np.sum((X_1[i,:]-Z[0,:])**2)) dist=np.sqrt(np.sum((X_1[i,:]-Z[j,:])**2)) if(dist<dist_0): dist_0=dist val=j if(int(X_1[i,0])!=int(Z[j,0])): Z=np.vstack((Z,X_1[i,:])) temp=True X_1=np.delete(X_1,i,axis=0) #deleting i'th row if(temp==False): ct+=1 if(ct==20): break print("Print the condensed set is "+str(100*(Z.shape[0]/temp2))+"% of the main dataset") # + #The training using condensed set x_train=Z[:,:13] y_train=Z[:,13] #Setup a knn classifier with k neighbors as 9 gives a maxima in testing accuracy knn = KNeighborsClassifier(n_neighbors=9) #Fit the model knn.fit(x_train,y_train) #Get accuracy. print("Accuracy = "+str(100*knn.score(X_test,y_test))+" %") #let predict for X_test using the classifier we had fit above y_pred = knn.predict(X_test) print(classification_report(y_test,y_pred))
17PH20006/KNN using condensed set.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GIT PYTHON # + import os import sys # Download and extract a portable git git_dir = r"C:\Program Files\Git\cmd" git_bin = os.path.join(git_dir, "git") os.putenv("GIT_PYTHON_GIT_EXECUTABLE", git_bin) os.environ.putenv("GIT_PYTHON_GIT_EXECUTABLE", git_bin) # Attempt with VonC's Answer, making sure that it is first in PATH sys.path = [git_dir] + sys.path os.environ["PATH"] = os.pathsep.join([git_dir]) + os.pathsep + os.environ["PATH"] # Only import git now, because that's when the path is checked! import git
.ipynb_checkpoints/github_python_interface-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import pandas as pd import numpy as np import glob mgf_tab = pd.read_csv('/Volumes/MSSHARE/Joonyong/mgf_list_v3.log', sep='\t') mgf_tab[mgf_tab.id>234] result_files = glob.glob("*_Novor.csv") fname = f.rsplit('_', 1)[0] fileid = mgf_tab[mgf_tab.mgf_file==fname].id.tolist()[0] temp = pd.read_csv(f, skiprows=20, skipinitialspace=True)[["# id","peptide"]] temp['scan'] = str(fileid) + ":" + temp['# id'].apply(str) # + df_list = [] for f in result_files: fname = f.rsplit('_', 1)[0] fileid = mgf_tab[mgf_tab.mgf_file==fname].id.tolist()[0] temp = pd.read_csv(f, skiprows=20, skipinitialspace=True)[["# id","peptide"]] # temp['scan'] = str(fileid) + ":" + temp['# id'].apply(str) temp['fileid'] = fileid df_list.append(temp) print(df_list[-1].shape) df = pd.concat(df_list, ignore_index=True) print(df.shape) # - df.head() df['pepseq'] = df.peptide.str.replace("\(O\)", "M(+15.99)") def parse(raw_sequence): try: raw_sequence_len = len(raw_sequence) except: return "" peptide = [] index = 0 while index < raw_sequence_len: if raw_sequence[index] == "(": if peptide[-1] == "C" and raw_sequence[index:index+8] == "(+57.02)": peptide[-1] = "Cmod" index += 8 elif peptide[-1] == 'M' and raw_sequence[index:index+8] == "(+15.99)": peptide[-1] = 'Mmod' index += 8 elif peptide[-1] == 'N' and raw_sequence[index:index+6] == "(+.98)": peptide[-1] = 'Nmod' index += 6 elif peptide[-1] == 'Q' and raw_sequence[index:index+6] == "(+.98)": peptide[-1] = 'Qmod' index += 6 else: # unknown modification unknown_modification = True break else: peptide.append(raw_sequence[index]) index += 1 return ",".join(peptide) df['output_seq'] = df.pepseq.apply(parse) mgforder2scan = pd.read_csv('mgfId2ScanId.txt', sep='\t') print(mgforder2scan.shape) mgforder2scan.head() mgforder2scan_df = mgforder2scan.merge(mgf_tab[["id","mgf_file"]], left_on="MGFDataset", right_on="mgf_file", how="inner") print(mgforder2scan_df.shape) mgforder2scan_df.head(10) def findScanId(row): return mgforder2scan_df[(mgforder2scan_df.MGForder == row['# id']) & (mgforder2scan_df.id == row.fileid)].ScanNum.tolist()[0] df['scanid'] = df.apply(findScanId, axis=1) df[['fileid','output_seq','scanid']].to_csv('novor_result_all.txt', sep='\t', index=False) # # read novor results from the csv file df = pd.read_csv('novor_result_all.txt', sep='\t') df['scan'] = df['fileid'].apply(str).str.cat(df['scanid'].apply(str), sep=":") df.head() df = df[['fileid','output_seq','scanid']]. kaiko_result = pd.read_pickle('/Volumes/MSSHARE/Joonyong/PnnlRun3_235_v4_0001_ep_60/mgf_test/all_results_df.pkl') kaiko_result_df = kaiko_result[kaiko_result.file_id>234] df = df.merge(kaiko_result_df[['scan', 'target_seq', 'len_AA']], left_on="scan", right_on="scan", how="right") df.head() # + mass_H = 1.0078 mass_H2O = 18.0106 mass_NH3 = 17.0265 mass_N_terminus = 1.0078 mass_C_terminus = 17.0027 mass_CO = 27.9949 mass_AA = {'_PAD': 0.0, '_GO': mass_N_terminus-mass_H, '_EOS': mass_C_terminus+mass_H, 'A': 71.03711, # 0 'R': 156.10111, # 1 'N': 114.04293, # 2 'Nmod': 115.02695, 'D': 115.02694, # 3 'C': 103.00919, # 4 'Cmod': 160.03065, # C(+57.02) #~ 'Cmod': 161.01919, # C(+58.01) # orbi 'E': 129.04259, # 5 'Q': 128.05858, # 6 'Qmod': 129.0426, 'G': 57.02146, # 7 'H': 137.05891, # 8 'I': 113.08406, # 9 'L': 113.08406, # 10 'K': 128.09496, # 11 'M': 131.04049, # 12 'Mmod': 147.0354, 'F': 147.06841, # 13 'P': 97.05276, # 14 'S': 87.03203, # 15 'T': 101.04768, # 16 'W': 186.07931, # 17 'Y': 163.06333, # 18 'V': 99.06841, # 19 } # print(mass_AA) def get_longest_match_with_novor(output_seq, target_seq, len_AA): # if exact_match > 0: # return (0, len_AA) if (type(output_seq) == float) | (output_seq == ""): return (0, 0) output = output_seq.split(',') decoder_input = target_seq.split(',') decoder_input_len = len(decoder_input) output_len = len(output) decoder_input_mass = [mass_AA[x] for x in decoder_input] decoder_input_mass_cum = np.cumsum(decoder_input_mass) output_mass = [mass_AA[x] for x in output] output_mass_cum = np.cumsum(output_mass) length = min(decoder_input_len, output_len) num_match = 0 i = 0 j = 0 match_list = [0 for i in range(decoder_input_len)] while i < decoder_input_len and j < output_len: if abs(decoder_input_mass_cum[i] - output_mass_cum[j]) < 0.5: if abs(decoder_input_mass[i] - output_mass[j]) < 0.1: num_match += 1 match_list[i] = 1 i += 1 j += 1 elif decoder_input_mass_cum[i] < output_mass_cum[j]: i += 1 else: j += 1 # print('ou tput_seq:', output) # print('target_seq:', decoder_input) # print(match_list) count_list = match_list for i in range(length-1): if count_list[length - i - 2] == 0: count_list[length - i - 2] = 0 else: count_list[length - i - 2] += count_list[length - i - 1] # print(count_list) max_idx = np.argmax(count_list) return (max_idx, count_list[max_idx]) # test print("5. Finding longest subsequences of matching with Novor scoring method") idx = 2 print(get_longest_match_with_novor(df.output_seq[idx], df.target_seq[idx], df.len_AA[idx])) df[['longest_match_idx_with_novor', 'longest_match_length_with_novor']] = df.apply( lambda row: pd.Series(get_longest_match_with_novor(row['output_seq'], row['target_seq'], row['len_AA']), dtype='int32'), axis=1) # - df['exact_match'] = (df.longest_match_length_with_novor==df.len_AA).astype(int) df.head() df.exact_match.mean() df.to_pickle('all_results_from_novor.pkl') # # read pickle df = pd.read_pickle('all_results_from_novor.pkl') print(df.exact_match.mean()) print(df.shape) print(df.target_seq.drop_duplicates().shape) print(df.target_seq.drop_duplicates().shape)
analysis/for_novor/novor_results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # !pip install pybaseball import pandas as pd import numpy as np from pybaseball import statcast import random pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) class Season_cleaner: """ Cleans a season dataframe """ def __init__(self, dataframe): self.df = dataframe # Features to drop self.drop_columns = [ 'spin_dir', 'spin_rate_deprecated', 'break_angle_deprecated', 'break_length_deprecated', 'game_type', 'tfs_deprecated', 'tfs_zulu_deprecated', 'umpire' ] # List fo unique pitcher ID's self.pitchers = self.df['pitcher'].unique().tolist() def drop_features(self): """ Drops depriciated features """ self.df = self.df.drop(columns = self.drop_columns) def drop_instances(self): """ Drops useless instances """ self.df = self.df.dropna(axis = 0, how = 'all') def fielding_alignment_typecast(self): """ Forces the object type onto the fielding allignment columns """ self.df['if_fielding_alignment'] = self.df['if_fielding_alignment'].astype(object) self.df['of_fielding_alignment'] = self.df['of_fielding_alignment'].astype(object) def chronological_sort(self): """ Sort pitches chronologically """ self.df = self.df.sort_values(by = [ 'game_date', 'game_pk', 'at_bat_number', 'pitch_number' ]) def pitch_type(self): """ Feature Name: pitch_type Feature Description: The type of pitch derived from Statcast. Issue: Feature is supposed to contain a 2 character string, but many values (265) are filled with long strings of numerical characters. Example: 160421_181540 Solution: Replace values longer than 2 characters in lengeth with np.NaN """ self.df['pitch_type'] = self.df.apply( lambda row: np.NaN\ if len(str(row['pitch_type'])) > 2\ else row['pitch_type'], axis = 1) """ Issue: Many values of this feature are recorded as 'UN' Solution: Replace value with np.NaN """ self.df['pitch_type'] = self.df['pitch_type'].replace({'UN':np.nan}) """ Issue**: The pitch type feature is filled with NaN values Solution: We will create a mapping of a pitchers id and his normalized pitch counts. Using these normalized values as weights we will select a random pitch type and fill the NaN value for that pitcher. We will use df.apply, but this could be time optomized by using series vectorization. """ # Populate mapping pitcher_dict = {} for pitcher in self.pitchers: # Pitcher's prior pitch type probabilites pitch_type_weights = self.df[self.df.pitcher == pitcher]\ .pitch_type\ .value_counts(normalize=True) pitcher_dict[pitcher] = pitch_type_weights.to_dict() # Fill nan values pitcher_dict = pd.DataFrame(pitcher_dict).fillna(0).to_dict() # Select replacement pitch type and fill NaN values def pick_a_pitch(pitcher_id): """ Returns a random pitch type label Uses pitchers prior pitch type probabilites as weights """ population = list(pitcher_dict[pitcher_id].keys()) weights = list(pitcher_dict[pitcher_id].values()) return random.choices(population, weights, k=1)[0] # Iterate by instance, fill null values self.df['pitch_type'] = self.df.apply( lambda row: pick_a_pitch(row['pitcher']) \ if pd.isnull(row['pitch_type']) \ else row['pitch_type'], axis = 1) def pitch_subtype(self): """ Creates a pitch_subtype feature """ pitch_type_map = {'FA':'fastball', 'FF':'fastball', 'FT':'fastball', 'FC':'fastball', 'FS':'fastball', 'SI':'fastball', 'SF':'fastball', 'SL':'breaking', 'CB':'breaking', 'CU':'breaking', 'SC':'breaking', 'KC':'breaking', 'CH':'offspeed', 'KN':'offspeed', 'EP':'offspeed', 'FO':'breaking', 'PO':'pitchout', 'IN':'pitchout'} self.df['pitch_subtype'] = self.df['pitch_type'] self.df['pitch_type'] = self.df['pitch_type'].map(pitch_type_map) def count_status(self): """ Feature: count_status Description: The ratio of balls and strikes for the current at bat Issue: There are two existing features related to the count. We need to represent the count as a categorical feature. Solution: Classifiy the pitchers position reguarding the count (Ahead, Behind, Neutral) """ self.df['balls'] = self.df['balls'].replace({4:3, 5:3}) self.df['count_status'] = self.df['balls'].astype('int').astype('str')\ + self.df['strikes'].astype('int').astype('str') count_status_mapping = { '00':'neutral', '21':'neutral', '32':'neutral', '10':'behind', '20':'behind', '30':'behind', '31':'behind', '01':'ahead', '02':'ahead', '11':'ahead', '12':'ahead', '22':'ahead' } self.df['count_status'] = self.df['count_status'].map(count_status_mapping) def score_differential(self): """ Feature: Score Differential Description: The absolute value of the difference in home team score and away team score """ self.df['score_differential'] = abs(self.df['home_score'] - self.df['away_score']) def bases_loaded(self): """ Feature**: Bases Loaded Description: A binary indication of the bases being loaded or not """ self.df['on_1b'] = self.df['on_1b'] * 0 + 1 self.df['on_1b'] = self.df['on_1b'].fillna(0) self.df['on_2b'] = self.df['on_2b'] * 0 + 1 self.df['on_2b'] = self.df['on_2b'].fillna(0) self.df['on_3b'] = self.df['on_3b'] * 0 + 1 self.df['on_3b'] = self.df['on_3b'].fillna(0) self.df['bases_loaded'] = self.df['on_1b'] + self.df['on_2b'] + self.df['on_3b'] self.df['bases_loaded'] = self.df['bases_loaded'].apply(lambda x: 1 if x == 3 else 0) def batter_swung(self): """ Feature: swung Description: Binary feature describing wheather or not the batter swung at the pitch or not """ swung = ['foul','hit_into_play','swinging_strike','hit_into_play_no_out', 'hit_into_play_score','foul_tip','swinging_strike_blocked', 'foul_bunt','missed_bunt'] self.df['batter_swung'] = self.df['description'].apply(lambda x: 1 if x in swung else 0) def ball_position(self): """ Creates a feature describing where the pitch crosses the strikezone plane """ self.df['ball_high'] = self.df['plate_z'] > self.df['sz_top'] self.df['ball_low'] = self.df['plate_z'] < self.df['sz_bot'] self.df['ball_left'] = self.df['plate_x'].apply(lambda x: x < -0.73) self.df['ball_right'] = self.df['plate_x'].apply(lambda x: x > 0.73) def in_strikezone(self): """ Binary feature representing wheather or not the pitch was in the strikezone """ self.df['in_strikezone'] = (self.df['ball_high'].astype(int) + self.df['ball_low'].astype(int) + self.df['ball_left'].astype(int) + self.df['ball_right'].astype(int)) self.df['in_strikezone'] = self.df['in_strikezone'].apply( lambda x: 0 if x > 0 else 1) def chased(self): """ Binary feature representing wheather or not the batter chased the pitch """ self.df['chased'] = self.df['batter_swung'] - self.df['in_strikezone'] self.df['chased'] = self.df['chased'].apply(lambda x: 1 if x == 1 else 0) def clean(self): print('Dropping features...') self.drop_features() print('Done!') print('Dropping instances...') self.drop_instances() print('Done!') print('Typecasting...') self.fielding_alignment_typecast() print('Done!') print('Sorting pitches...') self.chronological_sort() print('Done!') print('Cleaning pitch type...') self.pitch_type() print('Done!') print('Creating pitch subtype...') self.pitch_subtype() print('Done!') print('Creating count status...') self.count_status() print('Done!') print('Creating score differential...') self.score_differential() print('Done!') print('Creating bases loaded...') self.bases_loaded() print('Done!') print('Creating batter swung...') self.batter_swung() print('Done!') print('Creating ball position...') self.ball_position() print('Done!') print('Creating strikezone...') self.in_strikezone() print('Done!') print('Creating chased...') self.chased() print('Done!') return self.df seasons = {'2010':{'start_date': '2010-04-04', 'end_date': '2010-04-10'}, #'2011':{'start_date': '2011-03-31', 'end_date': '2011-10-28'}, #'2012':{'start_date': '2012-03-28', 'end_date': '2012-10-28'}, #'2013':{'start_date': '2013-03-31', 'end_date': '2013-10-30'}, #'2014':{'start_date': '2014-03-22', 'end_date': '2014-10-29'}, #'2015':{'start_date': '2015-04-05', 'end_date': '2015-11-01'}, #'2016':{'start_date': '2016-04-03', 'end_date': '2016-11-02'}, #'2017':{'start_date': '2017-04-02', 'end_date': '2017-11-01'}, #'2018':{'start_date': '2018-03-29', 'end_date': '2018-10-28'}, #'2019':{'start_date': '2019-03-20', 'end_date': '2019-09-07'} } def pull_statcast_data(start_date, end_date, year): """ Date Format: YYYY-MM-DD """ df = statcast(start_dt = start_date, end_dt = end_date) return df def compress_and_export(df, year, f_path = "season_pickles/"): """ Pickle DataFrame """ df.to_pickle(path=(f_path + year + ".pkl" ),compression='zip') def pull_clean_and_pickle(start_date, end_date, year): """ Queries statcast, calls cleaning function, pickles season dataframes, and writes to seasons directory """ df = pull_statcast_data(start_date, end_date, year) df = Season_cleaner(df).clean() compress_and_export(df, year) # + year = '2010' start_date = seasons[year]['start_date'] end_date = seasons[year]['end_date'] df = pull_statcast_data(start_date, end_date, year) # - season = Season_cleaner(df) df = season.clean() for year in seasons.keys(): start_date = seasons[year]['start_date'] end_date = seasons[year]['end_date'] pull_clean_and_pickle(start_date, end_date, year)
Work in progress/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Light Curve Plot and Curve Fit # %matplotlib notebook # + # Special import to allow PGF (for LaTeX) output while still showing # interactive plots # https://timodenk.com/blog/exporting-matplotlib-plots-to-latex/ # https://matplotlib.org/tutorials/text/pgf.html import matplotlib from matplotlib.backends.backend_pgf import FigureCanvasPgf matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf) # https://stackoverflow.com/questions/2537868/sans-serif-math-with-latex-in-matplotlib matplotlib.rcParams.update({ "pgf.texsystem": "pdflatex", 'font.family': 'sans-serif', "font.sans-serif": ["Raleway"], 'text.usetex': True, 'pgf.rcfonts': False, 'text.latex.preamble': [ r'\usepackage{color}', r'\usepackage{cmbright}', r'\usepackage{amsmath}' ], 'pgf.preamble': [ r'\usepackage{color}', r'\usepackage{cmbright}', r'\usepackage{amsmath}' ] }) import numpy as np import matplotlib.pyplot as plt from matplotlib.dates import DateFormatter from astropy.timeseries import TimeSeries, BinnedTimeSeries, aggregate_downsample from astropy import units as u from astropy.time import Time from datetime import datetime from dateutil import tz from lmfit import Model from lmfit.models import GaussianModel, ConstantModel import csv # Links: # https://docs.astropy.org/en/stable/api/astropy.time.TimePlotDate.html x = [] y = [] y_error = [] with open('Measurements.csv') as f: reader = csv.DictReader(f, delimiter=',') for row in reader: x.append(float(row['JD_UTC'])) y.append(float(row['Source_AMag_T1'])) y_error.append(float(row['Source_AMag_Err_T1'])) # Convert our dates x_datetime = Time(x, format='jd', scale='utc') # + ts = TimeSeries(time=x_datetime) # Add data ts['mag'] = y * u.mag ts['err'] = y_error #print(ts) # Convert to binned time series with 1 minute bin size bts = aggregate_downsample(ts, time_bin_size=1*u.min, aggregate_func=np.nanmedian) #print(bts) # + xval = np.array(bts.time_bin_start.jd) yval = np.array(bts['mag']) err = np.array(bts['err']) # From: https://lmfit.github.io/lmfit-py/builtin_models.html # and: https://stackoverflow.com/questions/44573896/python-fit-gaussian-to-noisy-data-with-lmfit g_peak = GaussianModel() g_offset = ConstantModel() g_model = g_peak + g_offset g_pars = g_offset.make_params(c=np.nanmin(yval)) g_pars += g_peak.guess(yval, x=xval, center=np.median(xval), amplitude=.125, sigma=.035) g_result = g_model.fit(yval, g_pars, x=xval, nan_policy='propagate') #print(g_result.fit_report()) plt.figure(figsize=(7,4)) #plt.yticks(fontname = "DejaVu Sans") # Plot Gaussian fit plt.plot(xval, g_result.best_fit, 'b--', linewidth=1, alpha=.35, label='Gaussian Fit') # Plot data plt.errorbar(xval, yval, yerr=err, marker=".", linestyle='', elinewidth=0.5, capsize=2, capthick=0.5, color='black', label='RZ Cass Mag') plt.xlabel('JD UTC') plt.ylabel('Magnitude') ax = plt.gca() # Indicate first camera relocation ax.axvspan(2458935.75,2458935.77, alpha=.3, color='yellow', label='Relocate Camera') # Indicate cloud cover time period ax.axvspan(2458935.775,2458935.835, alpha=.3, color='grey', label='Cloud cover') # Indicate second camera relocation ax.axvspan(2458935.869,2458935.88, alpha=.3, color='green', label='Relocate Camera') # Set up secondary x axis for local time (MDT) ax2 = ax.twiny() formatter = DateFormatter("%m/%d\n%H:%M",tz=tz.gettz('MDT')) ax2.plot_date(x_datetime.plot_date,y,xdate=True,alpha=0) ax2.xaxis.set_major_formatter(formatter) ax2.xaxis.set_tick_params(rotation=0, labelsize=7) ax2.set_xlabel('Local Time (MDT)') ax.grid(which='major', axis='both', linestyle="--", linewidth=0.5, color='.75') #plt.legend(prop={'size':50}) #ax.legend(loc='upper left', borderpad=2) ax.legend(frameon=True, loc='lower left', borderpad=1, prop={'size':7}) ax.invert_yaxis() plt.show() plt.savefig("RZCas_light_curve.pdf", bbox_inches='tight') ## Some alternate output formats, uncomment to generate #plt.savefig("RZCas_light_curve.png", bbox_inches='tight', dpi=600) # PGF format for use with LaTeX #plt.savefig("RZCas_light_curve.pgf", bbox_inches='tight') # Output PGFplots code for LaTeX #tikzplotlib.clean_figure() # simplify and cleanup figure #tikzplotlib.save("RZCas_light_curve.tex") # -
observing_project/poster/figs/light_curve/light_curve.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import a module just a .py file from Bio import Entrez from numba import jit # email you use from NCBI Entrez.email = "<EMAIL>" # make a placeholder to store result from querying handle = Entrez.esearch(db = "pubmed", term="[Open science] AND Kenya") # make another placeholder which queries NCBI to get the NCBI # IDs of interest record = Entrez.read(handle) # The result is a dictionary that contains various values # Try running this line of code without the bracket ["IdList"] # %save record["IdList"] > NCBIids.txt # Great! We now have **PubMed IDs** that could contain Id's that have papers related to term *[Open science] AND Kenya.* This is the same way you'd request for information at pubmed in NCBI. Let go ahead and get the information the full paper if possible in the next step. # We just need to change our handle to get for instance a summary of # the data the papers we need handle2 = Entrez.esummary(db="pubmed", id = "30123385") # + # Let's bring our result back from NCBI record2 = Entrez.read(handle2) # see what we are capable of subsetting record2 # - print("Extract interesting entries in the data") print("") print(record2[0]['Id']) print("") print(record2[0]['Title']) print("") print(record2[0]['AuthorList']) print("") print(record2[0]['FullJournalName']) print("") print(record2[0]['EPubDate']) # We can't extract everything we want so let's just get the full paper # if we can # ?Entrez.efetch # + # As Caleb said this will give us XML output which we continue to parse # Notice, the documentation of each argument need more annotation handle4 = Entrez.efetch(db="pubmed", id = "30123385", rettype="gb",retmode="text") # fetching the result from the database # print(handle4.read()) # + # Checking what type of object is return XML maybe? # remove the delete the hash tag to see for yourself # type(handle4.read()) # - # Using store magic command to store the output to a file # Storing in variables for later is not allowed # %store handle4.read() >> file.txt # you can use shell commands directly on the cell of jupyter # %cat file.txt # We have to write code 20 times! Nope there's an efficient way to solve our this problem, at least partially. It's .... you guessed it write a function. Go back to the code we wrote we put everything together and change just a few things and we are golden right? RIGHT? # write a function to automate rewriting the code over and over again # This is hack function you could use a loop to feed into the other using a generator that could traverse the list # generated and give you the result try and fix this function def paper_retriever(email, searchterm, pubmedid): '''The paper retriever function takes your email which uses the same name email as an argument, pubmedid you can get this from the previous function, searchterm take the NCBI type of query as a string and renamefile just changing your file names to avoid confusion. Return the full paper depending on if it's open access or not. ''' # Enter your own email Entrez.email = email # search NCBI from the particular search term with method esearch handle = Entrez.esearch(db="pubmed", term=searchterm) # get the results record = Entrez.read(handle) # the method efetch does and fetches the information you need brings it back to your Ipython session handle2 = Entrez.efetch(db="pubmed", id = pubmedid, rettype="gb",retmode="text") # seeing the results # print("Extract interesting entries in the data") # print("") # print(record[0]['Id']) # print("") # print(record[0]['Title']) # print("") # print(record[0]['AuthorList']) # print("") # print(record[0]['FullJournalName']) # print("") # print(record[0]['EPubDate']) # using cell magic in a function in the jupyter notebook return handle2.read() # calling the function as a test print (paper_retriever(email="<EMAIL>", searchterm="[Open science] AND Kenya",pubmedid=30123385)) paper = paper_retriever(email="<EMAIL>", searchterm="[Open science] AND Kenya",pubmedid=30123385) paper # %store paper >> file2.txt # %cat file2.txt # + # %%writefile paper_retriever.py from Bio import Entrez def paper_retriever(email, searchterm, pubmedid): '''The paper retriever function takes your email which uses the same name email as an argument, pubmedid you can get this from the previous function, searchterm take the NCBI type of query as a string. Return the full paper depending on if it's open access or not. ''' # Enter your own email Entrez.email = email # search NCBI from the particular search term with method esearch handle = Entrez.esearch(db="pubmed", term=searchterm) # get the results record = Entrez.read(handle) # the method efetch does and fetches the information you need brings it back to your Ipython session handle2 = Entrez.efetch(db="pubmed", id = pubmedid, rettype="gb",retmode="text") # using cell magic in a function in the jupyter notebook return handle2.read() paper1 = paper_retriever(email="<EMAIL>", searchterm="[Open science] AND Kenya",pubmedid=30123385) # #%store paper >> papers1.txt print(paper1) # + from Bio import Entrez def paper_parser(term, identity): handle = Entrez.esearch(db="pubmed", term=identity) record = Entrez.read(handle) print (record) handle2 = Entrez.esummary(db="pubmed", id = identity) record2 = Entrez.read(handle2) print("Extract interesting entries in the data") print("") print(record2[0]['Id']) print("") print(record2[0]['Title']) print("") print(record2[0]['AuthorList']) print("") print(record2[0]['FullJournalName']) print("") print(record2[0]['EPubDate']) return record print(paper_parser(term="[Open science] AND Kenya", identity=30123385)) # + # %%writefile paper_parser.py from Bio import Entrez def paper_parser(term, identity): Entrez.email = "<EMAIL>" #use your email handle = Entrez.esearch(db="pubmed", term=identity) record = Entrez.read(handle) print (record) handle2 = Entrez.esummary(db="pubmed", id = identity) record2 = Entrez.read(handle2) print("Extract interesting entries in the data") print("") print(record2[0]['Id']) print("") print(record2[0]['Title']) print("") print(record2[0]['AuthorList']) print("") print(record2[0]['FullJournalName']) print("") print(record2[0]['EPubDate']) return record2 paper2 = paper_parser(term="[Open science] AND Kenya", identity=30123385) # #%store paper2 >> papers2.txt run in ipython session print(paper2) # - # %run ../Code/paper_retriever.py # %run ../Code/paper_parser.py # stores the interesting NCBI results # %store record["IdList"] >> NCBIids.txt # >group publications by date # import the data # do some cleaning that is, parsing datetime objects # select the necessary columns # groupby date and publications columns # %cat kenyan_papers_details.txt # import data manipulation library import pandas as pd # importing data using the read_csv function delimiter set to tabs df = pd.read_csv('../Data/kenyan_papers_details.txt', delimiter="\t") # .head() allows you to see the first observations df.head() # see the column names of the dataframe df.columns # gives a concise summary of the dataframe df.info() # dimensions of the dataframe (rows, columns) df.shape # Columns of interest include: EpubDate df.PubDate[1] # + # df['EPubDate'] = df['EPubDate'] # + cell_style="split" # counting the number of times a publication was # posted for this sample 2018 Jan 18 was common df.EPubDate.value_counts()[:10] # + cell_style="split" # a lot of publications were posted in 2018 for this # sample df.PubDate.value_counts()[:10] # - # answers the question what are the popular journals researchers post their work # This could coicide with the type of research that's done mostly namely the malaria journal df_group_pubs = df.groupby(by=["FullJournalName"]).count().sort_values(by="Source", ascending=False) df_group_pubs # grouping by publication date and fulljournalname, papers were posted recently in PloS one # to get all the results i recommend subsetting the column and writing to a file df_group_pubs2 = df.groupby(by=["PubDate","FullJournalName"]).count().sort_values(by="Source", ascending=False) df_group_pubs2 # counting the number of occurences of a paricular journals as confirmation that our function works # wellcome open research has 27 papers df_group_pubs.EPubDate.sort_values(ascending=False) # same thing but taking publication date into consideration df_group_pubs.PubDate.sort_values(ascending=False) df_group_pubs2.sort_values(by="FullJournalName",ascending=False) # group pubs by journals # # import the data # do some cleaning that is, parsing datetime objects # select the necessary columns # groupby date and publications columns df["FullJournalName"].value_counts() def summary (filename): '''The function summary takes a file and outputs important summary statistics namely: * The number of records * The most popular times of the year a journal is published * group publications by date * group publication by Journals Returns tables with the above information.''' df = pd.read_csv(filename, delimiter="\t") msg = "The number of publications from the sample gotten from NCBI (rows,columns) {}" msg.format(df.shape); print("=" * 100) print ("Common electronic publication dates" + str(df.EPubDate.value_counts()[:10])) print ("Common Publication dates" + str(df.PubDate.value_counts()[:10])) print("=" * 100) print("Grouping publications by date and journal") df_group_pubs2 = df.groupby(by=["PubDate","FullJournalName"]).count().sort_values(by="Source", ascending=False) print(df_group_pubs2) print("="*100) print("Sorting the publications found by journals") print(df_group_pubs.PubDate.sort_values(ascending=False)) return "Done" print (summary('../Data/kenyan_papers_details.txt')) # + # taking the df column with author list to determine the most frequent author in the 2000 paper sample df.AuthorList.value_counts()[1:100] # Found 4 occurrences of the following authors # '<NAME>', '<NAME>', '<NAME>', 'Akutse KS', 'Foba CN', '<NAME>', '<NAME>' # '<NAME>', '<NAME>' # + # commands used find the common and least common author names our sample # # %cut -f2 kenyan_papers_details.txt | head -10 # # %cut -f2 kenyan_papers_details.txt > authorList2000papers.txt # - # # common names in publications # # |name| occurence of name| # | ------------- |-----:| # |Van| 114| # |Wang |60| # |Otieno| 52| # |Zhang |49| # |Bukusi| 46| # |Cheng|45| # Above are the common names that were found in the kenyan_papers_details.txt file. People with the name Van and Wang were the most common followed by Otieno, Zhang, Bukusi and Cheng. These were determined by the wordcloud application using the file authorList2000papers.txt [here]( https://gettingappy.shinyapps.io/wordcloudunigrams/). In the image the size of the word corresponds to the frequency of the text. # ![The wordcloud and how the parameters were set](plots/wordcloud_result.png) # ?pd.read_csv # load in the dataframe and give the columns names df2 = pd.read_csv("PMID_PMC_Journal_Year.txt",delimiter="\t",names=["pmid","pmd","journal","year"]) # no rows are lost # pandas loads in an all or nothing approach df2.shape # concise summary of the dataset df2.info() # + # Use R programming language in the notebook # https://www.linkedin.com/pulse/interfacing-r-from-python-3-jupyter-notebook-jared-stufft/ # import rpy2.rinterface # # %load_ext rpy2.ipython # + # # %%R -i df2 # head(df2) # - # convert the txt file into a csv for visualization in R df2.to_csv("PMID_PMC_Journal_Year.csv")
Notebooks/data-import-NCBI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd from pathlib import Path from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix # # Hypothesis: # # I guess that the logistic regression will be the best model. train_df = pd.read_csv(Path('Resources/2019loans.csv')) test_df = pd.read_csv(Path('Resources/2020Q1loans.csv')) train_df.head() # Drop redundant columns to create X training data, remove target column X_train = train_df.drop(['Unnamed: 0', 'index', 'loan_status'], axis=1) X_train # get dummy the x data the entire dataframe X_dummies_train = pd.get_dummies(X_train) print(X_dummies_train.columns) X_dummies_train # loan status is the target y_train = train_df['loan_status'] y_train y_train_label = LabelEncoder().fit_transform(train_df['loan_status']) # Drop redundant columns to create X training data, remove target column X_test = test_df.drop(['Unnamed: 0', 'index', 'loan_status'], axis=1) X_test # get dummy the x test data the entire dataframe X_dummies_test = pd.get_dummies(X_test) print(X_dummies_test.columns) X_dummies_test # add missing dummy variables to testing set for col in X_dummies_train.columns: if col not in X_dummies_test.columns: X_dummies_test[col] = 0 # + # loan status is the target y_test = test_df['loan_status'] y_test # Do I need to convert the categorical data to numeric? # - y_test_label = LabelEncoder().fit_transform(test_df['loan_status']) # Train the Logistic Regression model on the unscaled data and print the model score from sklearn.linear_model import LogisticRegression classifier = LogisticRegression() classifier.fit(X_train, y_train_label) print(f"Training Data Score: {classifier.score(X_dummies_train, y_train_label)}") print(f"Testing Data Score: {classifier.score(X_dummies_test, y_test_label)}") # Train a Random Forest Classifier model and print the model score from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(random_state=1).fit(X_dummies_train, y_train) print(f'Training Score: {clf.score(X_dummies_train, y_train)}') print(f'Testing Score: {clf.score(X_dummies_test, y_test)}') # Scale the data and rerun the models from sklearn.preprocessing import StandardScaler scaler = StandardScaler().fit(X_dummies_train) X_train_scaled = scaler.transform(X_dummies_train) X_test_scaled = scaler.transform(X_dummies_test) # Train the Logistic Regression model on the scaled data and print the model score clf = LogisticRegression().fit(X_train_scaled, y_train) print(f'Training Score: {clf.score(X_train_scaled, y_train)}') print(f'Testing Score: {clf.score(X_test_scaled, y_test)}') predictions = classifier.predict(X_dummies_test) pd.DataFrame({"Prediction": predictions, "Actual": y_test_label}) # + # Train a Random Forest Classifier model on the scaled data and print the model score clf = RandomForestClassifier(random_state=1).fit(X_train_scaled, y_train) print(f'Training Score: {clf.score(X_train_scaled, y_train)}') print(f'Testing Score: {clf.score(X_test_scaled, y_test)}') # How do I assess which model performed better? Do I need to use a confusion matrix and assess multiple elements? # - # # The best model is logistic regression # I would say that my hypothesis was correct, because the testing score for logistic regression on the scaled data is highest. The potential ranges for the individual features of the unscaled data is mitigated on the scaled data.
Credit Risk Evaluator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"is_executing": false} from collections import Counter, defaultdict from functools import reduce import numpy as np # implement PMI baseline for unsupervised parsing tree generation train = None val = None test = None linux_data_path = "/home/zijiao/work/data/mscoco/train_caps.txt" mac_data_path = "/Users/zijiaoyang/Documents/data/mscoco/train_caps.txt" file_path = "/Users/zijiaoyang/Documents/data/mscoco" # + pycharm={"is_executing": false, "name": "#%%\n"} # 413915 captions so image is supposed to be 82783 with open(mac_data_path, 'r') as f: words_doc = [] bigram_doc = [] sentences = [] for line in f: #sentence = tokenizer(line.strip()) #sentence = ['<s>'] + sentence + ['</s>'] sentence = line.strip().lower().split() sentences.append(sentence) bigram_doc.extend(list(zip(sentence, sentence[1:]))) words_doc.extend(sentence) # + class Vocabulary(object): """Simple vocabulary wrapper.""" def __init__(self): self.word2idx = {} self.idx2word = {} self.idx = 0 def add_word(self, word): if word not in self.word2idx: self.word2idx[word] = self.idx self.idx2word[self.idx] = word self.idx += 1 def __call__(self, word): if word not in self.word2idx: return self.word2idx['<unk>'] return self.word2idx[word] def __len__(self): return len(self.word2idx) import pickle with open("../data/mscoco/vocab.pkl", 'rb') as f: vocab = pickle.load(f) # - # compute dicts word2count = defaultdict(lambda: 0) bigram2count = defaultdict(lambda: 0) for word in words_doc: if vocab(word) == '<unk>': word2count[vocab('<unk>')] += 1 word2count[vocab(word)] += 1 for w1, w2 in bigram_doc: if vocab(w1) == '<unk>' or vocab(w2) == '<unk>': word2count[vocab('<unk>'), vocab('<unk>')] += 1 bigram2count[(vocab(w1), vocab(w2))] += 1 # + pycharm={"is_executing": false, "name": "#%%\n"} # Compute probs total_wdcounts = reduce(lambda a, b: a+b, list(word2count.values())) p_uni = {word: count/total_wdcounts for word, count in word2count.items()} total_bicounts = reduce(lambda a, b: a+b, list(bigram2count.values())) p_bi = {bigram: count/total_bicounts for bigram, count in bigram2count.items()} # + pycharm={"is_executing": false, "name": "#%%\n"} def pmi(word1, word2, p_uni=p_uni, p_bi=p_bi, smooth=.7): """ Compute Negtive pointwise mutual information # add 1 smoothing """ word1, word2 = vocab(word1), vocab(word2) return np.minimum(np.log(p_bi.get((word1, word2), 0)+smooth/(p_uni.get(word1, 0) + smooth) * (p_uni.get(word2, 0) +smooth)), 0) def parse(distance, left, right): """ Compute the paring boundary based on given syntactic distance Input: distances computed for a sentence, left and right are boundaries :return: boundaries """ if left == right: return [] #print(left, right) p = left + np.argmax(distance[left: right]) return [(left, right)] + parse(distance, left, p) + parse(distance, p+1, right) # + pycharm={"is_executing": false} # Compute spans for tree in data_path def compute_npmi(data_path, sm=.7): with open(data_path) as f: sent_distances = [] for line in f: # original code line.strip().lower().split() we used tokenizer here #sentence = tokenizer(line.strip().lower()) sentence = line.strip().lower().split() bis = zip(sentence, sentence[1:]) # Compute negative pointwise mutual info dist = [pmi(word1, word2,smooth=sm) for word1, word2 in bis] sent_distances.append(dist) #sent_distances.append((sentence, dist)) return sent_distances # + pycharm={"is_executing": false, "name": "#%%p\n"} # generate test dists import os import numpy as np import pickle data_path = '/Users/zijiaoyang/Documents/data/mscoco/' bras = [] for sm in np.linspace(0.1, 5, num=20): sent_distances = compute_npmi(os.path.join(data_path, 'test_caps.txt'), sm=sm) brackets = [parse(dis, 0, len(dis)-1) for dis in sent_distances] bras.append((sm, brackets)) # + pycharm={"is_executing": false, "name": "#%%\n"} # TODO: compute f1 score for pmi baseline # TODO: solve possible OOV problem, partly solved # TODO: make data preprocssing same as original code, so fair compare can be made: DONE # + pycharm={"is_executing": false, "name": "#%%\n"} import argparse import os #from evaluation import test_trees #from vocab import Vocabulary def extract_spans(tree): answer = list() stack = list() items = tree.split() curr_index = 0 for item in items: if item == ')': pos = -1 right_margin = stack[pos][1] left_margin = None while stack[pos] != '(': left_margin = stack[pos][0] pos -= 1 assert left_margin is not None assert right_margin is not None stack = stack[:pos] + [(left_margin, right_margin)] answer.append((left_margin, right_margin)) elif item == '(': stack.append(item) else: stack.append((curr_index, curr_index)) curr_index += 1 return answer def extract_statistics(gold_tree_spans, produced_tree_spans): gold_tree_spans = set(gold_tree_spans) produced_tree_spans = set(produced_tree_spans) precision_cnt = sum(list(map(lambda span: 1.0 if span in gold_tree_spans else 0.0, produced_tree_spans))) recall_cnt = sum(list(map(lambda span: 1.0 if span in produced_tree_spans else 0.0, gold_tree_spans))) precision_denom = len(produced_tree_spans) recall_denom = len(gold_tree_spans) return precision_cnt, precision_denom, recall_cnt, recall_denom def f1_score(produced_trees, gold_trees): gold_trees = list(map(lambda tree: extract_spans(tree), gold_trees)) #produced_trees = list(map(lambda tree: extract_spans(tree), produced_trees)) # TODO: get spans from pmi baseline, $$DONE assert len(produced_trees) == len(gold_trees) precision_cnt, precision_denom, recall_cnt, recall_denom = 0, 0, 0, 0 for i, item in enumerate(produced_trees): pc, pd, rc, rd = extract_statistics(gold_trees[i], item) precision_cnt += pc precision_denom += pd recall_cnt += rc recall_denom += rd precision = float(precision_cnt) / precision_denom * 100.0 recall = float(recall_cnt) / recall_denom * 100.0 f1 = 2 * precision * recall / (precision + recall) return f1, precision, recall # parser = argparse.ArgumentParser() # parser.add_argument('--candidate', type=str, required=False, # help='model path to evaluate') # parser.add_argument('--produced_path', required=True, default='./', # help='the path to produced_tree_spans') # args = parser.parse_args() # TODO: change path: Done #ground_truth = [line.strip() for line in open( # os.path.join('/home/zijiao/work/data/mscoco/', 'test_ground-truth.txt'))] ground_truth = [line.strip() for line in open( os.path.join('/Users/zijiaoyang/Documents/data/mscoco/', 'test_ground-truth.txt'))] # import pickle # with open(args.produced_path, 'rb') as f: # trees = pickle.load(f) #trees = [line.strip() for line in open(os.path.join('/home/zijiao/work/VGSNLextend/trees.txt'))] for sm, trees in bras: f1, precision, recall = f1_score(trees, ground_truth) #print('Model:', args.candidate) print(f'sm is {sm:.2f}') print(f'F1 score: {f1:.2f}, precision: {precision:.2f}, recall: {recall:.2f}') # TODO: check if it works, it worked...... # TODO: generate tree file for test:DONE # # ! change vocab to default # + pycharm={"is_executing": false, "name": "#%%\n"} # - with open('/Users/zijiaoyang/Documents/data/mscoco/test_caps.txt', 'r') as f: sents = [] for line in f: sents.append(line.strip().lower()) sents[1]
pmi_baseline.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,.pct.py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # Mixing TensorFlow models with GPflow # # This notebook explores the combination of Keras TensorFlow neural networks with GPflow models. # %% import numpy as np import tensorflow as tf from matplotlib import pyplot as plt import gpflow from gpflow.ci_utils import ci_niter from scipy.cluster.vq import kmeans2 from typing import Dict, Optional, Tuple import tensorflow as tf import tensorflow_datasets as tfds import gpflow from gpflow.utilities import to_default_float iterations = ci_niter(100) # %% [markdown] # ## Convolutional network inside a GPflow model # %% original_dataset, info = tfds.load(name="mnist", split=tfds.Split.TRAIN, with_info=True) total_num_data = info.splits["train"].num_examples image_shape = info.features["image"].shape image_size = tf.reduce_prod(image_shape) batch_size = 32 def map_fn(input_slice: Dict[str, tf.Tensor]): updated = input_slice image = to_default_float(updated["image"]) / 255.0 label = to_default_float(updated["label"]) return tf.reshape(image, [-1, image_size]), label autotune = tf.data.experimental.AUTOTUNE dataset = ( original_dataset.shuffle(1024) .batch(batch_size, drop_remainder=True) .map(map_fn, num_parallel_calls=autotune) .prefetch(autotune) .repeat() ) # %% [markdown] # Here we'll use the GPflow functionality, but put a non-GPflow model inside the kernel.\ # Vanilla ConvNet. This gets 97.3% accuracy on MNIST when used on its own (+ final linear layer) after 20K iterations # %% class KernelWithConvNN(gpflow.kernels.Kernel): def __init__( self, image_shape: Tuple, output_dim: int, base_kernel: gpflow.kernels.Kernel, batch_size: Optional[int] = None, ): super().__init__() with self.name_scope: self.base_kernel = base_kernel input_size = int(tf.reduce_prod(image_shape)) input_shape = (input_size,) self.cnn = tf.keras.Sequential( [ tf.keras.layers.InputLayer(input_shape=input_shape, batch_size=batch_size), tf.keras.layers.Reshape(image_shape), tf.keras.layers.Conv2D( filters=32, kernel_size=image_shape[:-1], padding="same", activation="relu" ), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2), tf.keras.layers.Conv2D( filters=64, kernel_size=(5, 5), padding="same", activation="relu" ), tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(output_dim, activation="relu"), tf.keras.layers.Lambda(to_default_float), ] ) self.cnn.build() def K(self, a_input: tf.Tensor, b_input: Optional[tf.Tensor] = None) -> tf.Tensor: transformed_a = self.cnn(a_input) transformed_b = self.cnn(b_input) if b_input is not None else b_input return self.base_kernel.K(transformed_a, transformed_b) def K_diag(self, a_input: tf.Tensor) -> tf.Tensor: transformed_a = self.cnn(a_input) return self.base_kernel.K_diag(transformed_a) # %% [markdown] # $K_{uf}$ is in ConvNN output space, therefore we need to update `Kuf` multidispatch. # %% class KernelSpaceInducingPoints(gpflow.inducing_variables.InducingPoints): pass @gpflow.covariances.Kuu.register(KernelSpaceInducingPoints, KernelWithConvNN) def Kuu(inducing_variable, kernel, jitter=None): func = gpflow.covariances.Kuu.dispatch( gpflow.inducing_variables.InducingPoints, gpflow.kernels.Kernel ) return func(inducing_variable, kernel.base_kernel, jitter=jitter) @gpflow.covariances.Kuf.register(KernelSpaceInducingPoints, KernelWithConvNN, object) def Kuf(inducing_variable, kernel, a_input): return kernel.base_kernel(inducing_variable.Z, kernel.cnn(a_input)) # %% [markdown] # Now we are ready to create and initialize the model: # %% num_mnist_classes = 10 output_dim = 5 num_inducing_points = 100 images_subset, labels_subset = next(iter(dataset.batch(32))) images_subset = tf.reshape(images_subset, [-1, image_size]) labels_subset = tf.reshape(labels_subset, [-1, 1]) kernel = KernelWithConvNN( image_shape, output_dim, gpflow.kernels.SquaredExponential(), batch_size=batch_size ) likelihood = gpflow.likelihoods.MultiClass(num_mnist_classes) inducing_variable_kmeans = kmeans2(images_subset.numpy(), num_inducing_points, minit="points")[0] inducing_variable_cnn = kernel.cnn(inducing_variable_kmeans) inducing_variable = KernelSpaceInducingPoints(inducing_variable_cnn) model = gpflow.models.SVGP( kernel, likelihood, inducing_variable=inducing_variable, num_data=total_num_data, num_latent_gps=num_mnist_classes, ) # %% [markdown] # And start optimization: # %% data_iterator = iter(dataset) adam_opt = tf.optimizers.Adam(0.001) training_loss = model.training_loss_closure(data_iterator) @tf.function def optimization_step(): adam_opt.minimize(training_loss, var_list=model.trainable_variables) for _ in range(iterations): optimization_step() # %% [markdown] # Let's do predictions after training. Don't expect that we will get a good accuracy, because we haven't run training for long enough. # %% m, v = model.predict_y(images_subset) preds = np.argmax(m, 1).reshape(labels_subset.numpy().shape) correct = preds == labels_subset.numpy().astype(int) acc = np.average(correct.astype(float)) * 100.0 print("Accuracy is {:.4f}%".format(acc))
doc/source/notebooks/tailor/gp_nn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # NUMPY Tutorial # # https://docs.scipy.org/doc/numpy-dev/, # https://www.python-course.eu/numpy.php/ # # ## What is NUMPY? # NumPy is an acronym for "Numeric Python" or "Numerical Python" # A python package that supports the creation and manipulation of n-dimensional arrays (ndarray) of homogeneous data types. # Numpy objects are fixed at creation, adjusting the size creates a new object. # The objects in the array must all have the same type ( You can defeat this somewhat in that if you create numbpy array of lists, the lists themselves can contain different objects) # # This means that mathematical operations can happen quickly since the size and type of the array elements are fixed. # Numbpy was designed for using large arrays and matrices. # SciPy (Scientific Python) extends the use of the numby ndarray's with advanced fucntions: minimization, regression, Fourier-transformation, ... # # # ## Setting up # # If you already have python and pip installed the use: # # pip install numpy scipy # # Or follow the intstructions from http://www.numpy.org/ or https://docs.scipy.org/doc/numpy-dev/user/index.html#user # # ## Using NUMBPY # Import it to use it #import numbpy import numpy as np # One dimensional array cvalues = [25.3, 24.8, 26.9, 23.9] c=np.array(cvalues) print (c) # Assume they are celsius measurements that we want to convert to Fahrenheit # List expression evaluation print "list expression result="+str([ x*9/5+32 for x in cvalues] ) print "numbpy broadcast result="+str(c*9/5+32) # simpler syntax # ## Creating ranges using NUMBPY # # Python ranges # range([start], stop[, step]) # vs xrange # Renames xrange() to range() and wraps existing range() calls with list # # # np.arange([start,] stop[, step,], dtype=None) the stop value is not include in the numbers returned. Default step is one, the dtype is based on the start number format unless specified in dtype. range(1,15,2) # operates on integers only np.arange(-1.5,15,3.1415) # can create any sequence of numbers np.arange((1.0-2.5j),50,(1-4j)) #Even complex number sequences # ## NUMPY objects # https://docs.scipy.org/doc/numpy-dev/reference/index.html#reference # # 3. Array objects # # a. N-dimensional array # https://docs.scipy.org/doc/numpy-dev/reference/arrays.ndarray.html # # b. scalars # https://docs.scipy.org/doc/numpy-dev/reference/arrays.scalars.html # # c. indexing # https://docs.scipy.org/doc/numpy-dev/reference/arrays.indexing.html # # d. interating over arrays # https://docs.scipy.org/doc/numpy-dev/reference/arrays.nditer.html # # e. Standard array subclasses # https://docs.scipy.org/doc/numpy-dev/reference/arrays.classes.html # # f. the array interface # https://docs.scipy.org/doc/numpy-dev/reference/arrays.interface.html # # g. datetimes and timedeltas # https://docs.scipy.org/doc/numpy-dev/reference/arrays.datetime.html # # # ## NUMPY Universal functions (ufunc) # a. broadcasting # b. output type # c. internal buffers # d. error handling # e. casting rules # f. overriding ufunc # g. avalable ufuncs # ## NUMPY Routines Routines # a.
Lectures/Week 3 - Data aquisition and clean up -numpy_and_pandas/Underconstruction/NUMPY Tutorial-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numbers import os import random import pandas as pd import numpy as np import tensorflow as tf from tensorflow.contrib import layers from tensorflow.python.framework import ops, tensor_shape, tensor_util from tensorflow.python.ops import math_ops, random_ops, array_ops from tensorflow.python.layers import utils from sklearn.cross_validation import train_test_split import time from sklearn.utils import shuffle # load data df = pd.read_csv("./data/model_test.csv", encoding='utf-8') df = df.drop(['Unnamed: 0'], axis=1) df.tail() df = df[df.result != 0] # define X and y X = df.ix[:, 3:] y = pd.get_dummies(df.result) # + for feature in X.columns: X[feature] = (X[feature] - X[feature].mean())/X[feature].std() print("Averages") print(X.mean()) print("\n Deviations") print(pow(X.std(), 2)) # + # Generate Traning and Validation Sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # Convert to np arrays so that we can use with Tensorflow X_train = np.array(X_train).astype(np.float32) X_test = np.array(X_test).astype(np.float32) y_train = np.array(y_train).astype(np.float32) y_test = np.array(y_test).astype(np.float32) print(np.shape(X_train), np.shape(y_train)) print(np.shape(X_test), np.shape(y_test)) # + # Define parameters for the model learning_rate = 0.01 batch_size = 33 n_epochs = 10000 # Create placeholders for features and labels X = tf.placeholder(tf.float32, [None, 14], name='X_placeholder') Y = tf.placeholder(tf.float32, [None, 2], name='Y_placeholder') # Create weights and bias w = tf.Variable(tf.random_normal(shape=[14, 2], stddev=0.01), name='weights') b = tf.Variable(tf.zeros([1, 2], name='bias')) # Build model that returns the logits logits = tf.matmul(X, w) + b # Define log loss function ==> cross entropy of softmax of logits y = tf.nn.softmax(logits) loss = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(y), reduction_indices=[1])) # + optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) n_batches = int(len(X_train)/batch_size) for i in range(n_epochs): # train the model # shuffle X, y X_train, y_train = shuffle(X_train, y_train) total_loss = 0 for j in range(n_batches): X_batch, Y_batch = X_train[j*batch_size:(j+1)*batch_size], y_train[j*batch_size:(j+1)*batch_size] _, loss_batch = sess.run([optimizer, loss], feed_dict={X: X_batch, Y:Y_batch}) total_loss += loss_batch if i % 1000 == 0: print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches)) print('Optimization Finished') # + training_size = X_train.shape[1] test_size = X_test.shape[1] num_features = 14 num_labels = 2 num_hidden = 7 graph = tf.Graph() with graph.as_default(): tf_train_set = tf.constant(X_train) tf_train_labels = tf.constant(y_train) tf_valid_set = tf.constant(X_test) print(tf_train_set) print(tf_train_labels) ## Note, since there is only 1 layer there are actually no hidden layers... but if there were ## there would be num_hidden weights_1 = tf.Variable(tf.truncated_normal([num_features, num_hidden])) weights_2 = tf.Variable(tf.truncated_normal([num_hidden, num_labels])) ## tf.zeros Automaticaly adjusts rows to input data batch size bias_1 = tf.Variable(tf.zeros([num_hidden])) bias_2 = tf.Variable(tf.zeros([num_labels])) logits_1 = tf.matmul(tf_train_set , weights_1 ) + bias_1 rel_1 = tf.nn.relu(logits_1) logits_2 = tf.matmul(rel_1, weights_2) + bias_2 loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_2, labels=tf_train_labels)) optimizer = tf.train.GradientDescentOptimizer(.005).minimize(loss) ## Training prediction predict_train = tf.nn.softmax(logits_2) # Validation prediction logits_1_val = tf.matmul(tf_valid_set, weights_1) + bias_1 rel_1_val = tf.nn.softmax(logits_1_val) logits_2_val = tf.matmul(rel_1_val, weights_2) + bias_2 predict_valid = tf.nn.softmax(logits_2_val) # - def accuracy(predictions, labels): return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]) num_steps = 100000 with tf.Session(graph = graph) as session: tf.global_variables_initializer().run() print(loss.eval()) test_accuracy = [] for step in range(num_steps): _,l, predictions = session.run([optimizer, loss, predict_train]) test_accuracy.append(accuracy(predict_valid.eval(), y_test)) if (step % 10000 == 0 or step == num_steps-1): # print(predictions[3:6]) print('Loss at step %d: %f' % (step, l)) print('Training accuracy: %.1f%%' % accuracy(predictions, y_train[:, :])) print('Testing accuracy: %.1f%%' % accuracy(predict_valid.eval(), y_test)) np.max(test_accuracy)
04_modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 32-bit # metadata: # interpreter: # hash: 5796a2551c486a61ea09e71a43fb2f5e10e06fe2450aa13ced50ae80b2f5630b # name: python3 # --- # + # Import dependancies from bs4 import BeautifulSoup as bs from splinter import Browser import pymongo import pandas as pd import time import requests import os # - # Chrome driver executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=False) # MARS NEWS news_url= 'https://mars.nasa.gov/news/' browser.visit(news_url) html=browser.html news_soup = bs(html, 'html.parser') # MARS NEWS: Title and Paragraph news_title = news_soup.find_all('div', class_='content_title')[0].text news_p = news_soup.find_all('div', class_='article_teaser_body')[0].text print(news_title) print(news_p) # MARS IMAGE image_url= 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(image_url) html=browser.html image_soup = bs(html, 'html.parser') section=image_soup.find('div', class_ = "default floating_text_area ms-layer") featured_image=section.find('footer') featured_image_url = 'https://www.jpl.nasa.gov' + featured_image.find('a')['data-fancybox-href'] print(str(featured_image_url)) # MARS FACTS facts_url = "https://space-facts.com/mars/" tables = pd.read_html(facts_url) facts_table = tables[0] facts_table.columns = ['Description', 'Mars'] facts_table.set_index('Description') html_table = facts_table.to_html() html_table.replace('\n', '') print(html_table) #MARS HEMISPHERES home_url='https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(home_url) html=browser.html home_soup=bs(html, 'html.parser') # + base_url = 'https://astrogeology.usgs.gov/' page_list = home_soup.find_all('div', class_ = 'item') links = [] hemisphere_image_urls = [] for page in page_list: href = page.find('a', class_ = 'itemLink product-item') link = base_url + href['href'] links.append(link) time.sleep(1) for link in links: hemisphere_dict = {} browser.visit(link) html = browser.html page_soup = bs(html, 'lxml') title_block = page_soup.find('div', class_ = 'content') title = title_block.find('h2', class_ = 'title').text hemisphere_dict["title"] = title img_block = page_soup.find('div', class_ = 'downloads') img = img_block.find('a')['href'] hemisphere_dict['img_url'] = img hemisphere_image_urls.append(hemisphere_dict) # - print(hemisphere_image_urls) # + #MARS DICTIONARY mars_dict = { "news_title": news_title, "news_p": news_p, "featured_image_url": featured_image_url, "html_table": str(html_table), "hemisphere_images": hemisphere_image_urls} mars_dict # -
Mission_to_Mars/mission_to_mars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/iamfaith/DeepLearning/blob/master/bitcoin.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="OL2tyBxp2E4j" colab_type="code" outputId="1ee70999-cbe3-4a28-c4c0-75c1e3227451" colab={"base_uri": "https://localhost:8080/", "height": 292} import pandas as pd import time import seaborn as sns import matplotlib.pyplot as plt import datetime import numpy as np # get market info for bitcoin from the start of 2016 to the current day bitcoin_market_info = pd.read_html("https://coinmarketcap.com/currencies/bitcoin/historical-data/?start=20130428&end="+time.strftime("%Y%m%d"))[2] # print(bitcoin_market_info, time.strftime("%Y%m%d")) # print('------') # print(type(bitcoin_market_info[0]), bitcoin_market_info[1]['Date']) # convert the date string to the correct date format bitcoin_market_info = bitcoin_market_info.assign(Date=pd.to_datetime(bitcoin_market_info['Date'])) # when Volume is equal to '-' convert it to 0 bitcoin_market_info.loc[bitcoin_market_info['Volume']=="-",'Volume']=0 # convert to int bitcoin_market_info['Volume'] = bitcoin_market_info['Volume'].astype('int64') # sometime after publication of the blog, coinmarketcap starting returning asterisks in the column names # this will remove those asterisks bitcoin_market_info.columns = bitcoin_market_info.columns.str.replace("*", "") # look at the first few rows bitcoin_market_info.head() # + id="zKz_TfbNZfsV" colab_type="code" outputId="2a42d33e-87fb-4f17-edee-aba37f70a2c6" colab={"base_uri": "https://localhost:8080/", "height": 323} pd.read_html("https://coinmarketcap.com/currencies/bitcoin/historical-data/?start=20130428&end="+time.strftime("%Y%m%d")) # + id="LJEsIitH2ICv" colab_type="code" outputId="f6a12ee3-bc1a-41f7-c2a3-90693bc9448c" colab={"base_uri": "https://localhost:8080/", "height": 297} bitcoin_market_info.describe() # + id="OWreebjo5NGA" colab_type="code" outputId="10b23d3f-3fd4-4ece-bc5f-242b86568f46" colab={"base_uri": "https://localhost:8080/", "height": 204} # get market info for ethereum from the start of 2016 to the current day eth_market_info = pd.read_html("https://coinmarketcap.com/currencies/ethereum/historical-data/?start=20130428&end="+time.strftime("%Y%m%d"))[2] # convert the date string to the correct date format eth_market_info = eth_market_info.assign(Date=pd.to_datetime(eth_market_info['Date'])) # sometime after publication of the blog, coinmarketcap starting returning asterisks in the column names # this will remove those asterisks eth_market_info.columns = eth_market_info.columns.str.replace("*", "") # look at the first few rows eth_market_info.head() # + id="x_h1O0j28oHO" colab_type="code" outputId="512e606d-ea92-4a05-c0fd-33d747475206" colab={"base_uri": "https://localhost:8080/", "height": 297} eth_market_info.describe() # + id="92QfM1dL52i1" colab_type="code" colab={} # getting the Bitcoin and Eth logos import sys from PIL import Image import io if sys.version_info[0] < 3: import urllib2 as urllib bt_img = urllib.urlopen("http://logok.org/wp-content/uploads/2016/10/Bitcoin-Logo-640x480.png") eth_img = urllib.urlopen("https://upload.wikimedia.org/wikipedia/commons/thumb/0/05/Ethereum_logo_2014.svg/256px-Ethereum_logo_2014.svg.png") else: import urllib bt_img = urllib.request.urlopen("http://logok.org/wp-content/uploads/2016/10/Bitcoin-Logo-640x480.png") eth_img = urllib.request.urlopen("https://upload.wikimedia.org/wikipedia/commons/thumb/0/05/Ethereum_logo_2014.svg/256px-Ethereum_logo_2014.svg.png") image_file = io.BytesIO(bt_img.read()) bitcoin_im = Image.open(image_file) image_file = io.BytesIO(eth_img.read()) eth_im = Image.open(image_file) width_eth_im , height_eth_im = eth_im.size eth_im = eth_im.resize((int(eth_im.size[0]*0.8), int(eth_im.size[1]*0.8)), Image.ANTIALIAS) # + id="A8edrZsQ6Vla" colab_type="code" colab={} bitcoin_market_info.columns =[bitcoin_market_info.columns[0]]+['bt_'+i for i in bitcoin_market_info.columns[1:]] eth_market_info.columns =[eth_market_info.columns[0]]+['eth_'+i for i in eth_market_info.columns[1:]] # + id="WhZ5fixY6nIY" colab_type="code" outputId="9097044e-ac26-45ad-eac8-18edfca756fc" colab={"base_uri": "https://localhost:8080/", "height": 68} bitcoin_market_info.columns eth_market_info.columns # + id="Uqvd3IZt6o_1" colab_type="code" outputId="88069167-736a-4cd8-edec-713054296f7f" colab={"base_uri": "https://localhost:8080/", "height": 729} # fig= plt.figure(figsize=(6,3)) fig, (ax1, ax2) = plt.subplots(2,1, gridspec_kw = {'height_ratios':[3, 1]}, figsize=(15,10)) ax1.set_ylabel('Closing Price ($)',fontsize=12) ax2.set_ylabel('Volume ($ bn)',fontsize=12) ax2.set_yticks([int('%d000000000'%i) for i in range(10)]) ax2.set_yticklabels(range(10)) ax1.set_xticks([datetime.date(i,j,1) for i in range(2013,2020) for j in [1,7]]) ax1.set_xticklabels('') ax2.set_xticks([datetime.date(i,j,1) for i in range(2013,2020) for j in [1,7]]) ax2.set_xticklabels([datetime.date(i,j,1).strftime('%b %Y') for i in range(2013,2020) for j in [1,7]]) ax1.plot(bitcoin_market_info['Date'],bitcoin_market_info['bt_Open']) ax2.bar(bitcoin_market_info['Date'], bitcoin_market_info['bt_Volume'].values) fig.tight_layout() fig.figimage(bitcoin_im, 100, 120, zorder=3,alpha=.5) plt.show() # + id="0CUORWSZ6rqm" colab_type="code" outputId="2622e139-ace7-4b66-e514-17884944ca32" colab={"base_uri": "https://localhost:8080/", "height": 729} fig, (ax1, ax2) = plt.subplots(2,1, gridspec_kw = {'height_ratios':[3, 1]}, figsize=(15,10)) #ax1.set_yscale('log') ax1.set_ylabel('Closing Price ($)',fontsize=12) ax2.set_ylabel('Volume ($ bn)',fontsize=12) ax2.set_yticks([int('%d000000000'%i) for i in range(10)]) ax2.set_yticklabels(range(10)) ax1.set_xticks([datetime.date(i,j,1) for i in range(2013,2020) for j in [1,7]]) ax1.set_xticklabels('') ax2.set_xticks([datetime.date(i,j,1) for i in range(2013,2020) for j in [1,7]]) ax2.set_xticklabels([datetime.date(i,j,1).strftime('%b %Y') for i in range(2013,2020) for j in [1,7]]) ax1.plot(eth_market_info['Date'],eth_market_info['eth_Open']) ax2.bar(eth_market_info['Date'], eth_market_info['eth_Volume'].values) fig.tight_layout() fig.figimage(eth_im, 300, 180, zorder=3, alpha=.6) plt.show() # + id="HAskJvyZ8wgT" colab_type="code" outputId="0b122c3a-a31d-4d02-a2d5-fe9773ebf3bb" colab={"base_uri": "https://localhost:8080/", "height": 68} eth_market_info['eth_Open'] bitcoin_market_info.columns # + id="egZmHYlx8dgQ" colab_type="code" outputId="d6b34a3b-b5ce-4e26-a62a-843b000bd4e5" colab={"base_uri": "https://localhost:8080/", "height": 564} market_info = pd.merge(bitcoin_market_info,eth_market_info, on=['Date']) market_info = market_info[market_info['Date']>='2016-01-01'] print(market_info) for coins in ['bt_bt_', 'eth_']: kwargs = { coins+'day_diff': lambda x: (x[coins+'Close']-x[coins+'Open'])/x[coins+'Open']} market_info = market_info.assign(**kwargs) market_info.head() # + id="47sx3kLh9WDK" colab_type="code" outputId="269a4b8f-fe84-4532-b36b-76b9b3233efe" colab={"base_uri": "https://localhost:8080/", "height": 729} split_date = '2019-5-01' fig, (ax1, ax2) = plt.subplots(2,1, figsize=(15,10)) ax1.set_xticks([datetime.date(i,j,1) for i in range(2013,2020) for j in [1,7]]) ax1.set_xticklabels('') ax2.set_xticks([datetime.date(i,j,1) for i in range(2013,2020) for j in [1,7]]) ax2.set_xticklabels([datetime.date(i,j,1).strftime('%b %Y') for i in range(2013,2020) for j in [1,7]]) ax1.plot(market_info[market_info['Date'] < split_date]['Date'], market_info[market_info['Date'] < split_date]['bt_bt_Close'], color='#B08FC7', label='Training') ax1.plot(market_info[market_info['Date'] >= split_date]['Date'], market_info[market_info['Date'] >= split_date]['bt_bt_Close'], color='#8FBAC8', label='Test') ax2.plot(market_info[market_info['Date'] < split_date]['Date'], market_info[market_info['Date'] < split_date]['eth_Close'], color='#B08FC7') ax2.plot(market_info[market_info['Date'] >= split_date]['Date'], market_info[market_info['Date'] >= split_date]['eth_Close'], color='#8FBAC8') ax1.set_xticklabels('') ax1.set_ylabel('Bitcoin Price ($)',fontsize=12) ax2.set_ylabel('Ethereum Price ($)',fontsize=12) plt.tight_layout() ax1.legend(bbox_to_anchor=(0.03, 1), loc=2, borderaxespad=0., prop={'size': 14}) fig.figimage(bitcoin_im.resize((int(bitcoin_im.size[0]*0.65), int(bitcoin_im.size[1]*0.65)), Image.ANTIALIAS), 200, 260, zorder=3,alpha=.5) fig.figimage(eth_im.resize((int(eth_im.size[0]*0.65), int(eth_im.size[1]*0.65)), Image.ANTIALIAS), 350, 40, zorder=3,alpha=.5) plt.show() # + id="4JXym38x-JBS" colab_type="code" outputId="7fbcab1d-c88f-40eb-f980-ec56b6ae1e9a" colab={"base_uri": "https://localhost:8080/", "height": 729} # trivial lag model: P_t = P_(t-1) fig, (ax1, ax2) = plt.subplots(2,1, figsize=(15,10)) ax1.set_xticks([datetime.date(2019,i+1,1) for i in range(12)]) ax1.set_xticklabels('') ax2.set_xticks([datetime.date(2019,i+1,1) for i in range(12)]) ax2.set_xticklabels([datetime.date(2019,i+1,1).strftime('%b %d %Y') for i in range(12)]) ax1.plot(market_info[market_info['Date']>= split_date]['Date'], market_info[market_info['Date']>= split_date]['bt_bt_Close'].values, label='Actual') ax1.plot(market_info[market_info['Date']>= split_date]['Date'], market_info[market_info['Date']>= datetime.datetime.strptime(split_date, '%Y-%m-%d') - datetime.timedelta(days=1)]['bt_bt_Close'][1:].values, label='Predicted') ax1.set_ylabel('Bitcoin Price ($)',fontsize=12) ax1.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0., prop={'size': 14}) ax1.set_title('Simple Lag Model (Test Set)') ax2.set_ylabel('Etherum Price ($)',fontsize=12) ax2.plot(market_info[market_info['Date']>= split_date]['Date'], market_info[market_info['Date']>= split_date]['eth_Close'].values, label='Actual') ax2.plot(market_info[market_info['Date']>= split_date]['Date'], market_info[market_info['Date']>= datetime.datetime.strptime(split_date, '%Y-%m-%d') - datetime.timedelta(days=1)]['eth_Close'][1:].values, label='Predicted') fig.tight_layout() plt.show() # + id="ojRoeVf5_nC4" colab_type="code" outputId="cf93def3-5ff0-468d-82be-3e56a0bdbdee" colab={"base_uri": "https://localhost:8080/", "height": 607} fig, (ax1, ax2) = plt.subplots(1,2, figsize=(15, 10)) ax1.hist(market_info[market_info['Date']< split_date]['bt_bt_day_diff'].values, bins=100) ax2.hist(market_info[market_info['Date']< split_date]['eth_day_diff'].values, bins=100) ax1.set_title('Bitcoin Daily Price Changes') ax2.set_title('Ethereum Daily Price Changes') plt.show() # + id="Kzhlrdfy_5za" colab_type="code" outputId="00e8c060-5a3a-48fd-dd09-6f5ef2034719" colab={"base_uri": "https://localhost:8080/", "height": 729} np.random.seed(202) bt_r_walk_mean, bt_r_walk_sd = np.mean(market_info[market_info['Date']< split_date]['bt_bt_day_diff'].values), \ np.std(market_info[market_info['Date']< split_date]['bt_bt_day_diff'].values) bt_random_steps = np.random.normal(bt_r_walk_mean, bt_r_walk_sd, (max(market_info['Date']).to_pydatetime() - datetime.datetime.strptime(split_date, '%Y-%m-%d')).days + 1) eth_r_walk_mean, eth_r_walk_sd = np.mean(market_info[market_info['Date']< split_date]['eth_day_diff'].values), \ np.std(market_info[market_info['Date']< split_date]['eth_day_diff'].values) eth_random_steps = np.random.normal(eth_r_walk_mean, eth_r_walk_sd, (max(market_info['Date']).to_pydatetime() - datetime.datetime.strptime(split_date, '%Y-%m-%d')).days + 1) fig, (ax1, ax2) = plt.subplots(2,1, figsize=(15, 10)) ax1.set_xticks([datetime.date(2019,i+1,1) for i in range(12)]) ax1.set_xticklabels('') ax2.set_xticks([datetime.date(2019,i+1,1) for i in range(12)]) ax2.set_xticklabels([datetime.date(2019,i+1,1).strftime('%b %d %Y') for i in range(12)]) ax1.plot(market_info[market_info['Date']>= split_date]['Date'], market_info[market_info['Date']>= split_date]['bt_bt_Close'].values, label='Actual') ax1.plot(market_info[market_info['Date']>= split_date]['Date'], market_info[(market_info['Date']+ datetime.timedelta(days=1))>= split_date]['bt_bt_Close'].values[1:] * (1+bt_random_steps), label='Predicted') ax2.plot(market_info[market_info['Date']>= split_date]['Date'], market_info[market_info['Date']>= split_date]['eth_Close'].values, label='Actual') ax2.plot(market_info[market_info['Date']>= split_date]['Date'], market_info[(market_info['Date']+ datetime.timedelta(days=1))>= split_date]['eth_Close'].values[1:] * (1+eth_random_steps), label='Predicted') ax1.set_title('Single Point Random Walk (Test Set)') ax1.set_ylabel('Bitcoin Price ($)',fontsize=12) ax2.set_ylabel('Ethereum Price ($)',fontsize=12) ax1.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0., prop={'size': 14}) plt.tight_layout() plt.show() # + id="YaA9ZtaPBJBt" colab_type="code" colab={}
bitcoin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sirius Top 100 Model # # Experiment to see how the different parameters mimic the recent Top 100 Springsteen Songs contest on Sirius. Apparently 31,000 listeners selected their top 10 songs from a list of 300. This is an attempt to see if this can be modeled. # ## Assumptions # # * There is a true top 100 list. We are going to see if voting can re-create the list. # * The list will be represented by a weighting of 1.300 to 1.001 to represent the order. 1.300 is the number 1 song. The hypothesis is that the subtle weighting in the rank values will carry through to the end list even though a uniform distribution is used to generate random "votes." # # ## Parameters # # ### Number of votes per voter # * Votes are ranked # * Votes are equal # ### Number of voters # %matplotlib inline import random as random import numpy as np import pandas as pd import matplotlib.pyplot as plt from pandas import Series, DataFrame MAX_SONG = 300 MAX_VOTERS = 100000 NUM_VOTES = 10 random.sample(range(1001,1300),10) # Can I use the ZIP function to multiply the sample by a weight? -- not likely # # https://stackoverflow.com/questions/10271484/how-to-perform-element-wise-multiplication-of-two-lists-in-python # # In [1]: import numpy as np # # In [2]: a = np.array([1,2,3,4]) # # In [3]: b = np.array([2,3,4,5]) # # In [4]: a * b # Out[4]: array([ 2, 6, 12, 20]) list(range(NUM_VOTES,0,-1)) songs = np.array(random.sample(range(1001,1300),10)) rank = np.array(list(range(NUM_VOTES,0,-1))) songs * rank # Should I multiply the song by the score (so the weight applies) or just add the votes? # # Both options should be tried. bsongs = np.zeros(MAX_SONG, dtype=int) bsongs bsongs = np.zeros(MAX_SONG, dtype=int) indx = 10; for sng in random.sample(range(1,31),10): bsongs[sng] = indx; indx -= 1; bsongs bsongs = np.zeros(MAX_SONG, dtype=int) tophun = np.zeros(MAX_SONG, dtype=int) for voter in range(1,101): indx = 10; for sng in random.sample(range(1,31),10): bsongs[sng] += indx; indx -= 1; print(bsongs); for weight in range(0,31): wt = (100 + weight)/100; print(wt); tophun[weight] = wt * bsongs[weight]; print(tophun); bsongs = np.zeros(MAX_SONG, dtype=int) tophun = np.zeros(MAX_SONG, dtype=int) for voter in range(1,300001): indx = 10; for sng in random.sample(range(1,31),10): bsongs[sng] += indx; indx -= 1; print(bsongs); for weight in range(0,31): wt = (1000 + weight)/1000; #print(wt); tophun[weight] = wt * bsongs[weight]; print(tophun); plt.plot(bsongs[:31]) plt.plot(tophun[:31]) # ## More insights # # The list of choices are ranked by the voter. When they can see the song list, they pick and rank their 10 choices. # # In our proxy effort, the higher numbered songs have higher weights. We should sort the sample so the numbers are ordered and then assign our votes. Sort ranks smallest to highest, so reverse the sort when assigning weights. bsongs = np.zeros(MAX_SONG, dtype=int) tophun = np.zeros(MAX_SONG, dtype=int) for voter in range(1,101): indx = 10; picks = random.sample(range(1,31),10); picks.sort(); for sng in reversed(picks): bsongs[sng] += indx; indx -= 1; print(bsongs); for weight in range(0,31): wt = (100 + weight)/100; #print(wt); tophun[weight] = wt * bsongs[weight]; print(tophun); plt.plot(bsongs[:31]) plt.plot(tophun[:31]) # ## Graphing # # Add a way to see the votes graphically. # # Following Chapter 5 of 2nd Edition, Python for Data Analysis MAX_SONG = 300 MAX_VOTERS = 30000 bsongs = np.zeros(MAX_SONG, dtype=int) tophun = np.zeros(MAX_SONG, dtype=int) for voter in range(1,MAX_VOTERS): indx = 10 picks = random.sample(range(1,MAX_SONG),10) picks.sort() for sng in reversed(picks): bsongs[sng] += indx indx -= 1 #print(bsongs[0:MAX_SONG]) for weight in range(0,MAX_SONG): wt = (100 + weight)/100 #print(wt) tophun[weight] = wt * bsongs[weight] plt.plot(tophun) plt.plot(bsongs) # ## What about 1 vote for best song? MAX_SONG = 300 MAX_VOTERS = 100000 NUM_VOTES = 10 bsongs = np.zeros(MAX_SONG, dtype=int) tophun = np.zeros(MAX_SONG, dtype=int) for voter in range(1,MAX_VOTERS): indx = NUM_VOTES picks = random.sample(range(1,MAX_SONG),NUM_VOTES) picks.sort() for sng in reversed(picks): bsongs[sng] += indx indx -= 1 #print(bsongs[0:MAX_SONG]) for weight in range(0,MAX_SONG): wt = (1000 + weight)/1000 #print(wt) tophun[weight] = wt * bsongs[weight] plt.plot(tophun) plt.plot(bsongs)
Top100.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import pandas as pd import math import numpy as np import matplotlib.pyplot as plt # %matplotlib notebook # + data = pd.read_csv("results.csv", sep=",") x = data.iloc[:, 0] y = data.iloc[:, 1] # print(data) plt.xlabel("time (s)") plt.ylabel("temperature (K)") plt.plot(y) plt.savefig("output.png") # + fig = plt.figure(figsize=(5, 5)) ax = fig.gca() ax.plot(x,y) i0 = 0 i1 = 15 i2 = 30 i3 = len(data)-1 print(i0,i1,i2,i3) fit1 = np.polyfit(x[i0:i1],y[i0:i1],1) line1 = np.poly1d(fit1) y1 = line1(x) fit2 = np.polyfit(x[i2:i3],y[i2:i3],1) line2 = np.poly1d(fit2) y2 = line2(x) ax.plot(x,y1,c='red') ax.plot(x,y2,c='green') plt.show() print(fit1) print(fit2) # - ti = line1(2.5) tf = line2(2.5) print(ti,tf,tf-ti)
week_03_bombCalorimetry/virtualExperimentAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # test book to visualise process #import sys #print(sys.path) # - import tensorflow as tf # + num_classes = 2 image_shape = (160, 576) vgg_path = './data/vgg' vgg_tag = 'vgg16' with tf.Session() as sess: # load the model and weights from target net and get graph from the files tf.saved_model.loader.load(sess, [vgg_tag], vgg_path) graph = tf.get_default_graph() for i in graph.get_operations(): print (i.name) # + def get_names(graph=tf.get_default_graph()): return [t.name for op in graph.get_operations() for t in op.values()] def get_tensors(graph=tf.get_default_graph()): return [t for op in graph.get_operations() for t in op.values()] # - tensor_names = [t.name for op in tf.get_default_graph().get_operations() for t in op.values()] for q in tensor_names: print (q) # + import cv2 import matplotlib x = cv2.imread('./learn/peppers.png') RGB_x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) #cv2.imshow('pic', x) #cv2.waitKey() # + # %matplotlib inline #The line above is necesary to show Matplotlib's plots inside a Jupyter Notebook import cv2 from matplotlib import pyplot as plt #Import image image = cv2.imread('./learn/peppers.png') RGB_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #Show the image with matplotlib plt.imshow(RGB_image) plt.show() # + import matplotlib.pyplot as plt import matplotlib.image as mpimg image = mpimg.imread('./learn/peppers.png') plt.imshow(image) plt.show() # -
.ipynb_checkpoints/learn-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Green-Ampt infiltration and kinematic wave overland flow # # This tutorial shows how to create a simple model of rainfall, infiltration, runoff, and overland flow, using two hydrologic components: `SoilInfiltrationGreenAmpt` and `KinwaveImplicitOverlandFlow`. # # *(<NAME>, September 2021)* import numpy as np import matplotlib.pyplot as plt from landlab import imshow_grid, RasterModelGrid from landlab.io import read_esri_ascii from landlab.components import (SoilInfiltrationGreenAmpt, KinwaveImplicitOverlandFlow) # ## Theory # # The Green-Ampt method was introduced by Green and Ampt (1911) as a means of approximating the rate of water infiltration into soil from a layer of surface water. The method represents infiltration in terms of a wetting front that descends into the soil as infiltration progresses. A description of the method can be found in many hydrology textbooks, and in various online resources. The following is a brief summary, using the notation of Julien et al. (1995). The dimensions of each variable are indicated in square brackets, using the common convention that [L] means length, [M] is mass, and [T] is time. # # The Green-Ampt method approximates the rate of water infiltration into the soil, $f$ (dimensions of [L/T], representing water volume per unit surface area). Infiltration is driven by two effects: gravitational force, and downward suction (the "paper towel effect") due to a gradient in moisture at the wetting front. The method treats the infiltration rate as a function of the following parameters: # # - $K$ - saturated hydraulic conductivity [L/T] # - $H_f$ - capillary pressure head at the wetting front [L] # - $\phi$ - total soil porosity [-] # - $\theta_r$ - residual saturation [-] # - $\theta_e$ - effective porosity $= \phi - \theta_r$ [-] # - $\theta_i$ - initial soil moisture content [-] # - $M_d$ - moisture deficit $=\theta_e - \theta_i$ [-] # - $F$ - total infiltrated water depth [L] # # The equation for infiltration rate is: # # $$f = K \left( 1 + \frac{H_fM_d}{F} \right)$$ # # The first term in parentheses represents gravity and the second represents pore suction. If there were no pore suction effect, water would simply infiltrate downward at a rate equal to the hydraulic conductivity, $K$. The suction effect increases this, but it becomes weaker as the cumulative infiltration depth $F$ grows. Effectively, the second term approximates the pore-pressure gradient, which declines as the wetting front descends. # # The version used in this component adds a term for the weight of the surface water with depth $H$: # # $$f = K \left( 1 + \frac{H_fM_d}{F} + \frac{H}{F} \right)$$ # # The component uses a simple forward-difference numerical scheme, with time step duration $\Delta t$, in which the infiltration depth during one step is the lesser of the rate calculated above times $\Delta t$, or the available surface water, $H$: # # $$\Delta F = \min( f\Delta t, H)$$ # # Note that the cumulative infitration $F$ must be greater than zero in order to avoid division by zero; therefore, one should initialize the `soil_water_infiltration__depth` to a small positive value. # ## Example # # ### Read in topography from a sample DEM # # This is a lidar digital elevation model (DEM) from the West Bijou Creek escarpment on the Colorado High Plains, coarsened to 5 m grid resolution. # # Note: it is convenient to use local grid coordinates rather than UTM coordinates, which are what the DEM provides. Therefore, after reading topography data into a grid called `demgrid`, which uses UTM coordinates, we copy over the elevation data into a second grid (`grid`) of the same dimensions that uses local coordinates (i.e., the lower left corner is (0, 0)). # Read topography into a grid (demgrid, demelev) = read_esri_ascii( 'bijou_gully_subset_5m_edit_dx_filled.asc', name='topographic__elevation' ) # Create Landlab model grid and assign the DEM elevations to it, # then display the terrain. # (note: DEM horizontal and vertical units are meters) grid = RasterModelGrid((demgrid.number_of_node_rows, demgrid.number_of_node_columns), xy_spacing=5.0) elev = grid.add_zeros('topographic__elevation', at='node') elev[:] = demelev imshow_grid(grid, elev, colorbar_label='Elevation (m)') # ### Simulate a heavy 5-minute storm # # The next bits of code use the `SoilInfiltrationGreenAmpt` and `KinwaveImplicitOverlandFlow` components to model infiltration and runoff during a 5-minute, 90 mm/hr storm. # + # Create and initialize required input fields for infiltration # component: depth of surface water, and depth (water volume per # area) of infiltrated water. depth = grid.add_zeros('surface_water__depth', at='node') infilt = grid.add_zeros('soil_water_infiltration__depth', at='node') infilt[:] = 1.0e-4 # small amount infiltrated (0.1 mm) # Instantiate an infiltration component ga = SoilInfiltrationGreenAmpt( grid, ) # Instantiate an overland flow component kw = KinwaveImplicitOverlandFlow( grid, runoff_rate=90.0, roughness=0.1, depth_exp=5./3. ) # + # Set time step and storm duration dt = 10.0 # time step, sec storm_duration = 300.0 # storm duration, sec report_every = 60.0 # report progress this often nsteps = int(storm_duration / dt) next_report = report_every # - # Run it for 10 minutes of heavy rain for i in range(nsteps): kw.run_one_step(dt) ga.run_one_step(dt) if ((i+1) * dt) >= next_report: print('Time =', (i+1)*dt, 'sec') next_report += report_every # ### Plot the cumulative infiltration # # The plot below illustrates how the convergence of water in the branches of the gully network leads to greater infiltration, with less infiltration on steeper slopes and higher points in the landscape. imshow_grid( grid, 1000.0 * infilt, colorbar_label='Infiltration depth (mm)', cmap='GnBu' ) # ## Optional parameters # # The `SoilInfiltrationGreenAmpt` component provides a variety parameters that can be set by the user. A list and description of these can be found in the component's `__init__` docstring, which is printed below: print(SoilInfiltrationGreenAmpt.__init__.__doc__) # ## References # # <NAME>., & <NAME>. (1911). Studies on Soil Phyics. The Journal of Agricultural Science, 4(1), 1-24. # # <NAME>., <NAME>., and <NAME>. (1995) Raster-based hydrologic modeling of spatially-varied surface runoff, J. Am. Water Resour. As., 31, 523–536, doi:10.1111/j.17521688.1995.tb04039.x. # # <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2016) Model simulations of flood and debris flow timing in steep catchments after wildfire, Water Resour. Res., 52, 6041–6061, doi:10.1002/2015WR018176.
notebooks/tutorials/overland_flow/soil_infiltration_green_ampt/infilt_green_ampt_with_overland_flow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from backbones import Vgg16, resnet50, siamese from data import rgb2ycbcr, ImageDataLabelGenerator from models import CenterLossNet from tensorflow import keras import numpy as np from tqdm import tqdm_notebook from PIL import Image import pickle import os # + weight_decay = 5e-4 H, W, C = (150, 300, 3) nb_classes = 5004 lambda_c = 0.2 lr = 6e-4 feature_size = 512 final_active = 'sigmoid' # for siamese net # + train_data_gen = keras.preprocessing.image.ImageDataGenerator( samplewise_center=True, samplewise_std_normalization=True, zca_whitening=False, zca_epsilon=1e-6, rotation_range=16, width_shift_range=0.2, height_shift_range=0.1, zoom_range=0.2, fill_mode='reflect', horizontal_flip=True, vertical_flip=False, preprocessing_function=rgb2ycbcr, rescale=1. / 255, validation_split=0.1) model = CenterLossNet(siamese, "./trainSpace/", "CenterLossNet").create_model( _compile=True, use_weightnorm=False, database_init=False, load_weights=True, weights_path="./trainSpace/weights/CenterLossNet.h5", lambda_c=lambda_c).get_embedding() # + with open("../Dataset/metadata/p2l.pickle", "rb") as f: p2l = pickle.load(f) with open("../Dataset/metadata/tr_l2ps.pickle", "rb") as f: l2ps = pickle.load(f) def cal_dis(pa, pb): a = ps.index(pa) b = ps.index(pb) dis = np.sqrt(np.sum(np.square(embeds[a]-embeds[b]))) return dis # - ps = [p for p in p2l.keys()] print(len(ps)) embeds = [] batch_size = 1000 batch_x = [] for i, p in tqdm_notebook(enumerate(ps)): img = keras.preprocessing.image.load_img( os.path.join("../Dataset/train", p), color_mode='rgb', target_size=(H,W), interpolation="bicubic") x = keras.preprocessing.image.img_to_array( img, data_format="channels_last") if hasattr(img, 'close'): img.close() x = train_data_gen.standardize(x) batch_x.append(x) if i%batch_size == 0: embeds.append(model.predict(np.array(batch_x))) batch_x = [] embeds.append(model.predict(np.array(batch_x))) embeds = np.concatenate(embeds, axis=0) print(embeds.shape) #train_embeds = embeds[:15000] train_ps = ps[:15000] #test_embeds = embeds[15000:] test_ps = ps[15000:] with open("../Dataset/metadata/test_label.pickle", 'wb') as f: label = [] for p in test_ps: label.append(p2l[p]) pickle.dump(label, f) # + match = {} for p, l in tqdm_notebook(p2l.items()): if l in l2ps.keys(): match[p] = l2ps[l] unmatch = {} for p, l in tqdm_notebook(p2l.items()): dis_p = [] for p1, l1 in p2l.items(): if l1 != l: dis_p.append((p1, cal_dis(p, p1))) dis_p = sorted(dis_p, key=lambda x:x[1], reverse=False) dis_p = dis_p[:10] ps1, _ = zip(*dis_p) unmatch[p] = ps1 # - with open("../Dataset/metadata/unmatch.pickle", 'wb') as f: pickle.dump(unmatch, f) print(len(unmatch)) import pickle import numpy as np from tqdm import tqdm_notebook with open("../Dataset/metadata/match.pickle", 'rb') as f: match = pickle.load(f) with open("../Dataset/metadata/unmatch.pickle", 'rb') as f: unmatch = pickle.load(f) with open("../Dataset/metadata/test_embed.pickle", 'rb') as f: test_embed = pickle.load(f) with open("../Dataset/metadata/test_label.pickle", 'rb') as f: test_label = pickle.load(f) data_pairs = [] label_pairs = [] for i,p in tqdm_notebook(enumerate(test_ps)): match_p = p unmatch_p = np.random.choice(test_ps) if p in match.keys(): match_ps = match[p] for mp in match_ps: if mp in test_ps: match_p = mp break unmatch_ps = unmatch[p] for ump in unmatch_ps: if ump in test_ps: unmatch_p = ump break embed = test_embed[test_ps.index(p)] match_embed = test_embed[test_ps.index(match_p)] unmatch_embed = test_embed[test_ps.index(unmatch_p)] data_pairs.append([embed, match_embed]) data_pairs.append([embed, unmatch_embed]) label_pairs.append([test_label[test_ps.index(p)], test_label[test_ps.index(match_p)]]) label_pairs.append([test_label[test_ps.index(p)],test_label[test_ps.index(unmatch_p)]]) print(len(data_pairs)) with open("../Dataset/metadata/test_label_pairs.pickle", 'wb') as f: pickle.dump(label_pairs, f)
code2.0/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## ipcoal: simulation and analysis of coalescent genealogies # # The **ipcoal** Python package provides a simple framework for simulating and analyzing genealogies and inferred gene trees under complex demographic scenarios. You can generate demographic models representing population histories, species trees, or networks from a newick file and easily visualize the model in **toytree**. # # Model parameters are parsed by **ipcoal** to define a simulation framework in **msprime** that will be used to generate a distribution of genealogies from which SNPs, loci, or chromosomes can be simulated under general time reversible substitution models (i.e., like with seq-gen). # # The simulated sequence data can be saved to disk in a variety of formats, or, gene tree analyses can be automated to infer empirical gene trees on the simulated sequence data. The resulting true genealogies, summary statistics, and inferred trees are returned by **ipcoal** as a Pandas DataFrame for further statistical analysis. # ### Required software # All required software can be installed with the following conda command. # + # conda install ipcoal -c eaton-lab conda-forge # - import toytree import ipcoal # ### The main functions of *ipcoal* # Start by initializing a `Model` class object by providing a species tree/network and additional optional model parameters (e.g., Ne, migration, mutation rate, recombination rate). Then you can simulate either loci or SNPs on the genealogies produced under this model. **ipcoal** makes it easy to either write the sequence data to files under a variety of formats, or to perform phylogenetic inference on the sequence data directly. You can then compare true simulated genealogies to the inferred trees. Each of these functions is further demonstrated below. # + # init a model Class object for simulations tree = toytree.rtree.unittree(8, treeheight=1e6) model = ipcoal.Model(tree=tree, Ne=1e6, seed=12345) # simulate N unlinked SNPs (will run until N snps are produced) model.sim_snps(100) # simulate N loci of len L model.sim_loci(10, 300) # view the genealogies and stats in a table model.df # save table to a CSV file model.df.to_csv("./tree_table.csv") # view sequence data as an array model.seqs # write loci as separate phylip files to a directory model.write_loci_to_phylip(outdir="./tests") # write concatenated loci or snps to a single phylip file model.write_concat_to_phylip(outdir="./tests", name="test.phy") # infer a tree for every locus model.infer_gene_trees(inference_method='raxml') # - model.df.head(20) # ### Define a species/population tree # Node heights should be in units of generations. # + # generate an imbalanced 6-tip tree with root height of 1M generations tree = toytree.rtree.imbtree(4, treeheight=5e5) # draw tree showing idx labels tree.draw(tree_style='p', tip_labels=True); # - # ### Define an ipcoal simulation model. # Here you can define the demographic model by setting a global Ne value (overrides Ne values stored to the tree), and setting the mutation and recombination rates. You can define a admixture scenarios using a simple syntax provided by a list of tuples. In each tuple you list the (source, dest, edge_prop, rate), where edge_prop is a float value of the proportion of the length of the shared edge between two taxa from recent to the past at which the migration pulse took place. In other words, if you set this to (7, 4, 0.5, 0.1) then 10% of the population of 7 will migrate into population 4 (backwards in time) at the midpoint of the shared edge between them. model = ipcoal.Model( tree, Ne=1e6, mut=1e-8, recomb=1e-9, seed=123, #admixture_edges=[(6, 4, 0.5, 0.1)], ) model.sim_snps(10000); mats = ipcoal.utils.get_snps_count_matrix(tree, model.seqs) ipcoal.utils.calculate_dstat(model.seqs, 0, 1, 2, 3) import toyplot toyplot.matrix(mats[0], margin=0, width=300); import simcat simcat.plot.draw_count_matrix(width=700, height=700); model.seqs[[0, 1, 2, 3], :] # + import itertools import numpy as np from ipcoal.jitted import count_matrix_int # get matrix to fill nquarts = sum(1 for i in itertools.combinations(range(tree.ntips), 4)) counts = np.zeros((nquarts, 16, 16), dtype=np.int64) # iter qiter = itertools.combinations(range(tree.ntips), 4) quartidx = 0 for currquart in qiter: # cols indices match tip labels b/c we named tips node.idx quartsnps = model.seqs[currquart, :] # get counts counts[quartidx] = count_matrix_int(quartsnps.T) quartidx += 1 # - import toyplot toyplot.matrix(counts[0], margi\n=0, width=350, height=350); # ### Simulate genealogies and sequences for N independent loci of length L # Because our simulation includes recombination each locus may represent multiple genealogical histories. You can see this in the dataframe below where loc 0 is represented by 5 genealogies. # run the simulation model.sim_loci(nloci=10, nsites=500) # view the genealogies and their summary stats model.df.head(10) # ### Visualize genealogical variation using toytree # Here the genealogies are plotted with tips in the same order as in the species tree so that you can easily identify the discordance between genealogies and the species tree. # + # load a multitree object from first 5 genealogies mtre = toytree.mtree(model.df.genealogy) # draw trees from the first locus # with 'shared_axis' to show diff in heights # with 'fixed_order' to show diff in topology (relative to first tree) mtre.draw_tree_grid( start=0, ncols=4, nrows=1, shared_axis=True, fixed_order=tree.get_tip_labels(), tree_style='c', node_labels=False, node_sizes=8, tip_labels=True, ); # draw trees from the second locus mtre.draw_tree_grid( start=6, ncols=4, nrows=1, shared_axis=True, fixed_order=tree.get_tip_labels(), tree_style='c', node_labels=False, node_sizes=8, tip_labels=True, ); # - # ### Write the simulated sequence data to file # view the sequence array for the first locus (showing first 20 bp) model.seqs[0, :, :20] # write all loci as separate phylip files to a directory model.write_loci_to_phylip() # write all loci concatenated to a single sequence file model.write_concat_to_phylip() # ### Simulate N unlinked SNPs # # In some cases you may only be interested in sampling unlinked SNPs. This is easy to do in **ipcoal** using the `.sim_snps()` function. This has two modes, the default is to simulate genealogies and attempt to drop a mutation on each one given the mutation rate. It will continue to generate new genealogies until you get the requested number of SNPs (which could take forever in some instances, like mutation_rate=0). The other option is to turn on the `repeat_on_trees=True` flag, which will continue to try to simulate a SNP on each tree until it is successful before moving on to the next tree. This may be slightly faster but will likely introduce biases. Only use the latter mode out of curiosity. # # When simulating SNPs the dataframe in `.df` is not particularly interesting, since every genealogy corresponds to only 1 site and 1 SNP. But it is still of interest for testing methods that rely on SNP data as a summary of the genealogy. The sequence data in `.seqs` is now a 2-d array (ntaxa, nsnps) as opposed to 3-d (nloci, ntaxa, nsites) when simulating loci. The functions to write the data to files works the same as before. You can call `.write_seqs_to_phy()` to write. # simulate N unlinked SNPs model.sim_snps(100) # the genealogies for each SNP are stored in .df model.df.head() # the snp array is stored in .seqs model.seqs[:, :20] # write the snps array as a phylip file model.write_concat_to_phylip() # ### Infer gene trees # Writing the sequence data to disk is optional and actually not required for some types of analyses, since *ipcoal* has built-in inference tools for inferring gene trees from the sequence data while it is stored in memory. This can create a really simple and reproducible workflow based simply on the random seed used for your analysis without a need to upload your simulated files to DRYAD at the end of your project. # # When you call one of the *inference* methods it will fill a new column in your dataframe called **inferred_trees**. # simulate locus data tree = toytree.rtree.unittree(8, treeheight=1e6) model = ipcoal.Model(tree=tree, Ne=1e5) model.sim_loci(10, 500) model.infer_gene_trees(inference_method="raxml") model.df # save the dataframe with the inferred trees model.df.to_csv("./tree_table.csv") # ### Write data as a site count matrix (*sensu* SVDquartets) # + # for idx, mat in enumerate(snps.reshape((5,16,16))): # toyplot.matrix(mat, label="Matrix " + str(idx), colorshow=True);
notebooks/ipcoal-demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (kcd) # language: python # name: kcd # --- # ## Kernel Conditional Discrepancy Test: a replication # Replicates the simulation analysis from Park et al. 2021 # + tags=[] import numpy as np import matplotlib.pyplot as plt from sparse_shift import KCD import warnings warnings.filterwarnings("ignore") # - # ## Simulated data setup def sample_toy_data(n): X = np.random.uniform(0, 1, (n, 1)) X_below = (X < 0.3)[:, 0] X_above = (X >= 0.3)[:, 0] y0 = 3 + 5*X[:, 0] + X_below * np.random.normal(0, 1, (n)) + X_above * np.random.normal( 0, np.abs((1 + 7 * (X[:, 0] - 0.3))), (n)) y1 = 4*X[:, 0] + X_below * np.random.normal(0, 1, (n)) + X_above * np.random.normal( 0, np.abs((1 + 7 * (X[:, 0] - 0.3))), (n)) return X, np.vstack((y0, y1)).T # ## Hypothesis Tests under the null and alternative # + tags=[] n = 100 n_reps = 100 # 1000 n_power_reps = 20 stats = {'Y0': [], 'Y1': [], 'Y01': []} pvalues = {'Y0': [], 'Y1': [], 'Y01': []} # Pvalues for i in range(n_power_reps): print(i) np.random.seed(i) X, y_outcomes = sample_toy_data(n) z = np.random.choice(2, (n)) y = np.asarray([outcomes[i] for outcomes, i in zip(y_outcomes, z)]) kcd01 = KCD(n_jobs=-2) kcd0 = KCD(n_jobs=-2) kcd1 = KCD(n_jobs=-2) stat, pvalue = kcd01.test(X, y, z, reps=n_reps, fast_pvalue=fast_pvalue) stats['Y01'].append(stat) pvalues['Y01'].append(pvalue) stat, pvalue = kcd0.test(X, y_outcomes[:, 0], z, reps=n_reps, fast_pvalue=fast_pvalue) stats['Y0'].append(stat) pvalues['Y0'].append(pvalue) stat, pvalue = kcd1.test(X, y_outcomes[:, 1], z, reps=n_reps, fast_pvalue=fast_pvalue) stats['Y1'].append(stat) pvalues['Y1'].append(pvalue) # - fig, axes = plt.subplots(1, 3, figsize=(10, 3), sharey=True) for i, (key, val) in enumerate(pvalues.items()): ax = axes[i] n = len(val) entries, edges, _ = ax.hist( val, bins=np.arange(0, 1.1, 0.1), weights=np.ones(n) / n, color="b", ) # entries = height of each column = proportion in that bin # calculate bin centers bin_centers = 0.5 * (edges[:-1] + edges[1:]) ax.axhline(y=sum(entries) / len(bin_centers), ls="--", c="#333333") # errorbars are binomial proportion confidence intervals ax.errorbar( bin_centers, entries, yerr=1.96 * np.sqrt(entries * (1 - entries) / n), fmt=".", c="#333333", ) ax.set_title(f"Test {key} pvalues") # ax.set_xlim(0,1) ax.set_xticks([0, 1]) ax.set_yticks([0, 0.1, 1]) # ## Application of the witness function # + np.random.seed(100) n = 1000 X, y_outcomes = sample_toy_data(n) z = np.random.choice(2, (n)) y = np.asarray([outcomes[i] for outcomes, i in zip(y_outcomes, z)]) # Witness function X_lin = np.linspace(0, 1, 1000) Y_lin = np.linspace(-10, 25, 1000) kcd = KCD(reg=1.0, n_jobs=10) witness_mat = kcd.witness(X, y, z, X_lin, Y_lin) # - # ## Results # + fig, axes = plt.subplots(1, 3, figsize=(15, 3)) # Raw data and mean trend lines ax = axes[0] ax.set_ylabel('Y') ax.set_xlabel('X') ax.set_title('(a) Data') ax.scatter(X[:, 0], y_outcomes[:, 0], label=r'$Y_0$', c='blue', marker='x', alpha=0.2) ax.plot( np.linspace(0, 1, 10), 3+5*np.linspace(0, 1, 10), label=r'$E[Y_0|X]$', c='darkblue', linewidth=2, ls='-') ax.scatter(X[:, 0], y_outcomes[:, 1], label=r'$Y_1$', c='orange', marker='o', alpha=0.2) ax.plot( np.linspace(0, 1, 10), 4*np.linspace(0, 1, 10), label=r'$E[Y_1|X]$', c='orangered', linewidth=2, ls='-') ax.legend(loc='upper left') # Hypothesis test results ax = axes[1] ax.set_ylabel('Proportion of tests rejected') ax.set_xlabel('Hypothesis') ax.set_title('(b) Hypothesis Test') alpha = 0.05 ax.bar( [1, 2, 3], [np.mean(np.asarray(pvalues['Y0']) <= alpha), np.mean(np.asarray(pvalues['Y1']) <= alpha), np.mean(np.asarray(pvalues['Y01']) <= alpha)], align='center') ax.set_xticks([1, 2, 3]) ax.set_xticklabels([ r'$P_{Y_0|X}$ vs $P_{Y_0|X}$', r'$P_{Y_1|X}$ vs $P_{Y_1|X}$', r'$P_{Y_0|X}$ vs $P_{Y_1|X}$']) # Witness function ax = axes[2] ax.set_ylabel('Y') ax.set_xlabel('X') ax.set_title('(c) Conditional Witness Function') cm = ax.pcolormesh(X_lin, Y_lin, witness_mat, cmap='magma') fig.colorbar(cm, ax=ax) plt.show() # -
notebooks/01-kcd_paper_simulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Titanic Dataset: Exploratory Data Analysis # + import pandas as pd import numpy as np from pandas import Series,DataFrame import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # - #The titanic data is available through Kaggle, after sign-up. titanic_df = pd.read_csv('titan_train.csv') #Looking at the first few rows in the dataset. titanic_df.head() #Information about the dataset. titanic_df.info() titanic_df.describe() # ### Who were the passengers on the Titanic? #Creating a factorplot, charting the number of male and female passengers sns.factorplot('Sex',data=titanic_df,kind='count') sns.factorplot('Pclass',data=titanic_df,hue='Sex',kind='count') # + #Function to detect if a person is a man, woman or child. def man_wom_chi(passenger): age=passenger['Age'] sex=passenger['Sex'] return 'child' if age < 16 else sex #Using Pandas' apply method to create a new column "Person" titanic_df['Person'] = titanic_df.apply(man_wom_chi,axis=1) # - #Looking at the first 10 rows in the dataset, to see if our method worked. titanic_df[0:10] #Getting the actual counts print titanic_df['Person'].value_counts() sns.factorplot('Pclass',data=titanic_df,hue='Person',kind='count') titanic_df['Age'].hist() # + fig = sns.FacetGrid(titanic_df,hue='Pclass',aspect=4) fig.map(sns.kdeplot,'Age',shade=True) oldest = titanic_df['Age'].max() fig.set(xlim=(0,oldest)) fig.add_legend() # + fig = sns.FacetGrid(titanic_df,hue='Sex',aspect=4) fig.map(sns.kdeplot,'Age',shade=True) oldest = titanic_df['Age'].max() fig.set(xlim=(0,oldest)) fig.add_legend() # - # ### What deck were the passengers on and how does that relate to their class? #Because the cabin data was missing in a lot of cases, we can just drop it for now for this section. deck_df = titanic_df.dropna(axis=0) deck_df.head() # + #Grabbing the deck from the cabin numbers def get_level(passenger): cabin = passenger['Cabin'] return cabin[0] # get_level[deck_df.iloc[1]] # - deck_df['level']=deck_df.apply(get_level,axis=1) deck_df.head() sns.factorplot('level',data=deck_df,palette='winter_d',kind='count') sns.factorplot('level',data=deck_df,hue='Pclass',kind='count') # ### Where did the passengers come from? sns.factorplot('Embarked',data=titanic_df,hue='Pclass',x_order=['C','Q','S'],kind='count') # ### Who was with their family? titanic_df.head() #Adding the number of family a passenger had onboard titanic_df['Alone'] = titanic_df.SibSp + titanic_df.Parch titanic_df.tail() # + titanic_df['Alone'].loc[titanic_df['Alone']>0] = 'No' titanic_df['Alone'].loc[titanic_df['Alone']==0] = 'Yes' # - titanic_df.head() sns.factorplot('Alone',data=titanic_df,kind='count') # ### What factors helped someone survive the sinking? sns.factorplot('Pclass','Survived',data=titanic_df) sns.factorplot('Pclass','Survived',hue='Person',data=titanic_df) sns.lmplot('Age','Survived',data=titanic_df) sns.lmplot('Age','Survived',hue='Pclass',data=titanic_df) sns.factorplot('Survived','Alone',data=titanic_df)
Titanic Dataset - Exploratory Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: peacock # language: python # name: peacock # --- # # Elements that can be Xarrays # A few ideas on how xarray could be used to help make processing efficient. # # xarray is built on Pandas, so the most of the functions that you use in Pandas are the same in xarray. # # There are two main data types in xarray: # # - `xarray.DataArray` which is similar to `pandas.Series` # - `xarray.DataSet` which is similar to `pandas.DataFrame` # # Instead of `index` xarray uses `coords` # # Similarly, instead of `columns` xarray uses `dims`, well sort of if no dims are given then the column names are the names of the coords. # # ## Data Block # This would be a continuous time series of N points for a single channel.  The atomic element of the processing code.  The element would have methods like: # - Apply window # - Compute FFT/IFFT could use https://xrft.readthedocs.io/en/latest/index.html # - Detrend/pre-whiten # - Pick harmonic # # Xarray.DataArray → # - coords = time # - values = data # - attrs: # - survey.id # - station.id # - location.latitude # - location.longitude # # + import xarray class Block: """ The atomic unit for processing time series. Defined as a continuous time series of N points for a single channel """ def __init__(self, time_index, data): self.data = xarray.DataArray(data, coords=[("time", time_index)], name="data") @property def window_time(self): """ return the time of the given window, however this is calculate, so that it can be aligned later""" def apply_window(self, window_type, window_parameters): """ Apply a certain window type to the data """ window = self.get_window(window_type, window_parameters) return self.data * window def compute_fc(self): """ Compute the FFT of the given data block, could try to use <https://xrft.readthedocs.io/en/latest/index.html> """ return FFT(self.data) # - # # FC Array # This would be a time series of Fourier coefficients for N channels for a single decimation level. If the time index is the same between decimation levels, then this could be for M decimation levels. # # The coordinates would be a time index that is defined by the start time of the survey and the given window length, overlap, etc. These times should correlate with the `Block.window_time`. That way you could instantiate the FC array at the begining with zeros or NaNs, then as you calculate the FC for each window for each channel, you can then begin to fill in the FC array with values. # # Probably you would have an FC array for a single channel and then collect them into a larger FC array, but they are the same object type. # # class FCArray(): """ An intelligent collection of FC coefficients """ def __init__(self, time_index, channel_list): self.data = xarray.DataArray(data, coords=[("time", time_index), ("channels", channel_list)])
examples/notebooks/xarrays_and_processing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.0 ('mlclass') # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt df1 = pd.read_csv(r'C:\Users\Jalpa\Desktop\Modular-1\ML_LinearRegression\DATA\rock_density_xray.csv') df = df1.drop('Unnamed: 0', axis = 1) df.head() df.columns = ['Signal', 'Density'] df.columns sns.scatterplot(x = 'Signal', y = 'Density', data = df) # Linear Regression X = df['Signal'].values.reshape(-1,1) y = df['Density'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.1, random_state = 101) from sklearn.linear_model import LinearRegression lr_model = LinearRegression() lr_model.fit(X_train, y_train) lr_preds = lr_model.predict(X_test) lr_preds from sklearn.metrics import mean_absolute_error, mean_squared_error mean_absolute_error(y_test, lr_preds) np.sqrt(mean_squared_error(y_test, lr_preds)) signal_range = np.arange(0,100) signal_preds = lr_model.predict(signal_range.reshape(-1,1)) signal_preds sns.scatterplot(x = 'Signal', y = 'Density', data =df) plt.plot(signal_range, signal_preds) # Polynomial Regression def run_model(model, X_train, y_train, X_test, y_test): #Fit the model to training dataset model.fit(X_train, y_train) #performance metric preds = model.predict(X_test) rmse = np.sqrt(mean_squared_error(y_test, preds)) mae = mean_absolute_error(y_test, preds) print(f'RMSE: {rmse}') print(f'MAE: {mae}') # Plot Results Model signal range signal_range = np.arange(0,100) signal_preds = model.predict(signal_range.reshape(-1,1)) sns.scatterplot(x = 'Signal', y = 'Density', data =df, color = 'green') plt.plot(signal_range, signal_preds) model = LinearRegression() # It is not applicable run_model(model, X_train, y_train, X_test, y_test) from sklearn.pipeline import make_pipeline from sklearn.preprocessing import PolynomialFeatures pipe = make_pipeline(PolynomialFeatures(degree = 2), LinearRegression()) #It is not performing well run_model(pipe, X_train, y_train, X_test, y_test ) pipe = make_pipeline(PolynomialFeatures(degree = 6), LinearRegression()) #performing well run_model(pipe, X_train, y_train, X_test, y_test ) # # KNeighbors Regression from sklearn.neighbors import KNeighborsRegressor # + k_values = [1,5,10,30] for n in k_values: model = KNeighborsRegressor(n_neighbors=n) run_model(model, X_train, y_train, X_test, y_test) plt.show() # - from sklearn.tree import DecisionTreeRegressor model = DecisionTreeRegressor() # Not performing well(high variance) run_model(model, X_train, y_train, X_test, y_test) from sklearn.svm import SVR from sklearn.model_selection import GridSearchCV svr = SVR() param_grid = {'C': [0.01, 0.1, 1, 10, 100, 100], 'gamma': ['auto', 'scale']} grid_model = GridSearchCV(svr, param_grid) run_model(grid_model, X_train, y_train, X_test, y_test) #Performing well from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(n_estimators = 10) run_model(rf,X_train, y_train, X_test, y_test) #Not performing well from sklearn.ensemble import GradientBoostingRegressor, AdaBoostRegressor model = GradientBoostingRegressor() run_model(model, X_train, y_train, X_test, y_test) model = AdaBoostRegressor() run_model(model, X_train, y_train, X_test, y_test)
Random Forest/RF_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 8.0 # language: '' # name: sagemath # --- # 1) <span style="color:red"> Antes que nada, lee cuidadosamente estas instrucciones y las que aparecen en la hoja con la contraseña. </span> # # 2) Cambia el nombre de este archivo sustituyendo "nombre.apellido" por los tuyos, tal como aparecen en tu dirección de correo electrónico de la UAM. # # 3) Este archivo debe quedar en la carpeta "ENTREGA..." que está en el escritorio de tu cuenta de examen. Lo mejor es que esté en esa carpeta desde el comienzo del examen. # # 4) El examen resuelto debe quedar en este único archivo. No se puede usar un archivo para cada pregunta. # # 5) Recuerda que hay que deshabilitar el salvapantallas al comenzar el examen, tal como está indicado en la hoja con la contraseña. # # # # # CALIFICACIÓN: # # COMENTARIOS: # # 1) # ## Ejercicio 1 # # Consideramos el siguiente 'juego': inicialmente hay $n$ jugadores y en cada fase del juego cada jugador vivo elige **al azar** otro jugador vivo, distinto de sí mismo, y lo mata. El juego se repite hasta que queda un único jugador, *el elegido del destino*, o bien ninguno. En principio, es perfectamente posible que el juego se juegue una única vez. # # 1) Estima, con dos cifras decimales 'correctas', la probabilidad de que, partiendo de $n=100$ jugadores, haya un superviviente. Como se indicó en clase, entendemos que que son cifras decimales 'correctas' las que no cambian cuando se incrementa suficientemente el número $N$ de 'casos posibles'. # # 2) ¿Qué probabilidad tengo, si decido jugar con otros $99$ jugadores, de ser yo el *elegido del destino*? Modifica el programa del apartado anterior para estimar la probabilidad y explica el resultado obtenido. # # 3) Ahora queremos estudiar la duración del juego. Para eso debemos calcular *promedios* de la duración. Define una función *promedio(n,N)* que calcule el promedio y la desviación estándar de la duración con $n$ jugadores iniciales y $N$ repeticiones del juego. Evalúa *promedio(100,10^5)* y comenta los resultados obtenidos. # # 4) Finalmente, estudia la variación de los promedios al variar $n$ entre $10$ y $200$ saltando de $10$ en $10$. Realiza un gráfico de los resultados y analiza la dependencia funcional (el promedio como función de $n$). En este cuarto apartado probablemente tendrás que **elegir con más cuidado un $N$** (el número de repeticiones del juego utilizadas para calcular cada promedio) **adecuado** dadas las capacidades bastante limitadas de la máquina. # + def matar(vivos): if len(vivos) == 1: return vivos long = len(vivos) muertos = [] for item in vivos: L2 = copy(vivos) L2.remove(item) n = randint(0,long-2) muertos.append(L2[n]) sobreviven = list(set(vivos)-set(muertos)) return sobreviven print matar(srange(100)) # + def probabilidad(n,N): favorables = 0 for muda in xsrange(N): vivos = srange(n) sobreviven = copy(vivos) while len(sobreviven) > 1: sobreviven = matar(sobreviven) if len(sobreviven) == 1: favorables += 1 return (favorables/N).n() # %time L = [probabilidad(100,10^k) for k in srange(3,6)] print L # - # Debemos considerar que las dos primeras cifras decimales, que ya obtenemos con $10^4$ iteraciones son correctas porque se mantienen al pasar a $10^5$ iteraciones. # + def probabilidad2(n,N): favorables = 0 for muda in xsrange(N): vivos = srange(n) sobreviven = copy(vivos) while len(sobreviven) > 1: sobreviven = matar(sobreviven) if len(sobreviven) == 1 and sobreviven == [0]: favorables += 1 return (favorables/N).n() # %time L1 = [probabilidad2(100,10^k) for k in srange(3,6)] print L1 # - # La probabilidad obtenida, del orden de $0.005$ es la centésima parte de la obtenida en el apartado anterior. Es lo esperable porque no hay nada en el problema que me distinga de los otros $99$ jugadores, de forma que la probabilidad de que sobreviva alguno se debería repartir equitativamente entre los $100$ jugadores. # + def promedio(n,N): L = [] for muda in xsrange(N): jugadas = 0 vivos = srange(n) sobreviven = copy(vivos) while len(sobreviven) > 1: jugadas += 1 sobreviven = matar(sobreviven) L.append(jugadas) return (sum(L)/N).n(),sage.stats.basic_stats.std(L).n() # %time P = promedio(100,10^5) print P # - # La desviación estándar es bastante baja, y debemos esperar que el número de jugadas sea casi siempre menor a $6$. def promedios(n,N): L = [] for int in srange(10,n,10): L.append((int,promedio(int,N)[0])) return L # %time L2 = promedios(200,10**3) points(L2) # %time L3 = promedios(500,10**3) points(L3) var('A B'); model(x)=A*x^(B) diccionario = find_fit(L3,model,solution_dict=True); print diccionario points(L3)+plot(diccionario[A]*x^(diccionario[B]),x,0,500) var('C D'); model2(x)=C+D*log(x) diccionario2 = find_fit(L3,model2,solution_dict=True); print diccionario2 points(L3)+plot(diccionario2[C]+diccionario2[D]*log(x),x,0,500) # Aunque el primer modelo no es muy malo, parece claro que es mejor el segundo. # ## Ejercicio 2 # # Hemos visto que es posible estimar el área de un disco unidad *lanzando dardos al cuadrado unidad y contando los que caen dentro del disco*. El mismo procedimiento permite, en principio, estimar el volumen de la hiperesfera $\mathbb{B}_n$ de radio $1$ en $\mathbb{R}^n$ (i.e. el conjunto de puntos de coordenadas $(x_1,x_2,\dots,x_n)\in \mathbb{R}^n$ tales que $x_1^2+x_2^2+\dots+x_n^2\le 1$), pero los volúmenes que vamos obteniendo al incrementar $n$ son cada vez más pequeños y bastante pronto obtenemos cero como respuesta. # # Ésto se debe a que, cuando $n$ es grande, muy pocos dardos caen dentro de la hiperesfera debido a que ocupa muy poco volumen con respecto al volumen, $2^n$, del hipercubo unidad $[-1,1]^n$. En este ejercicio vemos una manera distinta de *lanzar dardos*, que nos va a asegurar que suficientes caen dentro de la hiperesfera. Denotemos por $V(n)$ el volumen de $\mathbb{B}_n$, que es lo que queremos calcular. # # ### Descripción del método # # 1) El *truco básico* consiste en considerar la hiperesfera de dimensión $n$ dentro del hipercilindro $\mathbb{C}_n:=\mathbb{B}_{n-1}\times [-1,1]$, en lugar de dentro del hipercubo $[-1,1]^n$. El volumen del hipercilindro es, gracias al teorema de Fubini del cálculo integral, igual al volumen de $\mathbb{B}_{n-1}$ multiplicado por dos. # # 2) En segundo lugar observamos que para obtener puntos aleatorios en el hipercilindro $\mathbb{C}_n$ basta generar puntos aleatorios $(x_1,x_2,\dots,x_{n-1})\in \mathbb{B}_{n-1}$ en la hiperesfera de dimensión $n-1$ y para cada uno producir un real aleatorio $x_n$ en el intervalo $[-1,1]$. # # 3) Para que esto funcione debemos encontrar una **manera eficiente** de generar puntos aleatorios en la hiperesfera $\mathbb{B}_{n-1}$, y el método que vamos a usar se llama de *cadena de Markov* (una especie de *paseo aleatorio generalizado*): # # A) El primer punto de la cadena $\mathbf{x}_0$ es, por ejemplo, el origen de coordenadas. # # B) Para cada punto $\mathbf{x}_t=(x_1,x_2,\dots,x_{n-1})\in \mathbb{B}_{n-1}$ obtenemos un nuevo punto $\mathbf{x}_{t+1}$ eligiendo una coordenada al azar, supongamos que hemos obtenido $x_i$, y un real aleatorio $\Delta$ en el intervalo $[-\delta,\delta]$. Entonces, cambiamos $x_i$ por $x_i+\Delta$ en $\mathbf{x}$ y si todavía estamos dentro de la hiperesfera ese es el nuevo punto $\mathbf{x}_{t+1}$ en la cadena. Si al hacer el cambio nos vamos fuera de la hiperesfera dejamos $\mathbf{x}_{t+1}:=\mathbf{x}_{t}$. # # C) La cadena $\{\mathbf{x}_0,\mathbf{x}_1,\dots,\mathbf{x}_t,\dots,\mathbf{x}_N\}$ es entonces un conjunto de $N+1$ puntos, todos en la hiperesfera $\mathbb{B}_{n-1}$, y se puede demostrar que, para $N$ suficientemente grande, están uniformemente distribuidos en la hiperesfera. # # 4) Después de estos preparativos podemos ya plantear el cálculo del volumen $V(n)$ de la hiperesfera $\mathbb{B}_n$: # # Generamos un número muy grande $N$ de puntos en la hiperesfera de dimensión $n-1$, usando el apartado 3), y para cada uno de ellos vamos calculando un punto en el hipercilindro, como en el apartado 2). Si el punto obtenido en el hipercilindro cae dentro de la hiperesfera de dimensión $n$, $\mathbb{B}_n$, incrementamos un contador, al que por ejemplo hemos llamado *dentro*. # # La fracción $dentro/N$, calculada después de haber generado $N$ puntos del hipercilindro, es aproximadamente igual al cociente de volúmenes $V(n)/(2V(n-1))$ (volumen de la hiperesfera dividido por volumen del hipercilindro). # # Entonces, podemos reducir el cálculo de $V(n)$ al de $V(n-1)$, el de $V(n-1)$ al de $V(n-2)$, etc., hasta llegar al volumen de la hiperesfera en dimensión $1$ que es igual a dos. # # # # ### Ejercicios # # 2.1) Define las funciones necesarias para implementar esta forma, *Monte Carlo con cadenas de Markov*, de calcular $V(n).$ # # 2.2) El volumen exacto de una hiperesfera se puede calcular mediante integrales, y se obtiene una fórmula que se indica en la celda siguiente: # def Vol_exacto(dim): return (pi**(dim/2.0)/gamma(dim/2.0+1.0)).n() print Vol_exacto(1);print Vol_exacto(2); print Vol_exacto(3); print Vol_exacto(100) # ¿Para qué queremos estimar esos mismos volúmenes mediante Monte Carlo? Los resultados exactos sirven como control de las estimaciones, y, en particular, permiten ajustar de forma óptima los parámetros $N$ y $\delta$. # # El parámetro $N$ debe ser suficientemente grande para obtener una precisión aceptable, pero no tan grande que tengamos que esperar una eternidad por el resultado, y el parámetro $\delta$, un real que podemos esperar que esté entre cero y uno, no puede ser muy pequeño, porque entonces todos los puntos de la cadena están muy próximos entre sí y no rellenan bien la hiperesfera, ni muy grande porque entonces la cadena repite muchas veces sus valores. # # Queremos elegir los parámetros, $N$ y $\delta$, para calcular, mediante Monte Carlo, el volumen de la hiperesfera de dimensión $50$ de forma que al menos sea correcta la primera cifra decimal no nula (por supuesto, cuantas más cifras correctas obtengamos mejor). Tomamos $N=10^5$ y elegimos un $\delta$ óptimo, dentro del intervalo $(0,1)$, mediante experimentos adecuados. # # # + def Q_d(dim,N,delta): dentro = 0 L = [0]*(dim-1) r = 0 for j in xsrange(N): k = randint(0,dim-2) x_original = L[k] x_nueva = x_original+2*delta*random()-delta if abs(x_nueva)<1 : radio_nuevo = r+x_nueva**2-x_original**2 if radio_nuevo<1: L[k] = x_nueva r = radio_nuevo t = 2*random()-1 if r+t**2<1: dentro += 1 return (dentro/N).n() # %time Q = Q_d(100,10**5,0.3) print Q # - def vol_d(dim,N,delta): vol = 2 for m in srange(2,dim+1): Q = 2*Q_d(m,N,delta) vol *= Q return vol # %time V1 = vol_d(2,10^5,0.3) print V1 # %time L4=[vol_d(3,10^5,0.1*k)-Vol_exacto(3) for k in srange(1,9)] print L4 # %time L5 = [vol_d(10,10^5,0.1*k)-Vol_exacto(10) for k in srange(1,9)] print L5 # Parece que el delta óptimo puede ser $\delta=0.1$ o quizá $\delta=0.5$, pero hagamos una prueba más: # %time L6 = [(abs(2*Q_d(50,10^5,0.1*k)-(Vol_exacto(50)/Vol_exacto(49))),k) for k in srange(1,10)] L6.sort() print L6 # %time L7 = [(abs(2*Q_d(50,10^6,0.1*k)-(Vol_exacto(50)/Vol_exacto(49))),k) for k in srange(1,10)] L7.sort() print L7 # En los dos casos hemos obtenido $\delta=0.8$ como óptimo, pero para asegurarnos deberíamos repetir estos cálculos más veces. Probamos a calcular el volumen usando $N=10^5$ y $\delta=0.8$. # %time V2 = vol_d(50,10^5,0.8) print V2 Vol_exacto(50)
2_Curso/Laboratorio/SAGE-noteb/IPYNB/SOLUCIONES/nombre.apellido-labodt-2017-ex4-SOL.ipynb
# # Get-CKAzADGroups # ## Metadata # # | | | # |:------------------|:---| # | platform | Azure | # | contributors | <NAME> @Cyb3rWard0g,MSTIC R&D | # | creation date | 2021-08-22 | # | modification date | 2021-09-08 | # | Tactics | [TA0007](https://attack.mitre.org/tactics/TA0007) | # | Techniques | [T1069.003](https://attack.mitre.org/techniques/T1069/003) | # ## Description # A threat actor might want to list all the groups in an organization, including but not limited to Microsoft 365 groups. # # ## Run Simulation # ### Get OAuth Access Token # + from msal import PublicClientApplication import requests import time function_app_url = "https://FUNCTION_APP_NAME.azurewebsites.net" tenant_id = "TENANT_ID" public_client_app_id = "KATANA_CLIENT_APP_ID" server_app_id_uri = "api://" + tenant_id + "/cloudkatana" scope = server_app_id_uri + "/user_impersonation" app = PublicClientApplication( public_client_app_id, authority="https://login.microsoftonline.com/" + tenant_id ) result = app.acquire_token_interactive(scopes=[scope]) bearer_token = result['access_token'] # - # ### Set Azure Function Orchestrator endpoint = function_app_url + "/api/orchestrators/Orchestrator" # ### Prepare HTTP Body data = [{'activityFunction': 'Azure', 'type': 'action', 'action': 'Get-CKAzADGroups', 'parameters': {'selectFields': 'ENTER-VALUE', 'filter': 'ENTER-VALUE', 'pageSize': 'ENTER-VALUE'}}] # ### Send HTTP Request # + http_headers = {'Authorization': 'Bearer ' + bearer_token, 'Accept': 'application/json','Content-Type': 'application/json'} results = requests.get(endpoint, json=data, headers=http_headers, stream=False).json() time.sleep(5) # - # ### Explore Output query_status = requests.get(results['statusQueryGetUri'], headers=http_headers, stream=False).json() query_results = query_status['output'] query_results
docs/notebooks/azure/discovery/Get-CKAzADGroups.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Market Data # GS Quant allows for easy access to market data # + from gs_quant.data import Dataset from datetime import date, timedelta from gs_quant.session import GsSession client_id = None client_secret = None GsSession.use(client_id=client_id, client_secret=client_secret, scopes=[ 'run_analytics', 'read_financial_data', 'read_product_data']) # - # Get IRSWAP Data ds_rate = Dataset('SWAPRATES_STANDARD') # Get a swap rate data set ds_rate.get_coverage() # Lets see what's available start_date = date(2019, 1, 1) end_date = date.today() - timedelta(days=1) data = ds_rate.get_data(start_date, end_date, assetId='MABXTJXXN8WJR7R8', tenor='10y') print(data.head()) ds_rate_vol = Dataset('SWAPTIONVOL_STANDARD') # Get a swaption vol data set ds_rate_vol.get_coverage() # Lets see what's available data = ds_rate_vol.get_data(start_date, end_date, assetId='MAFYB8Z4R1377A19') print(list(data)) print(data.head())
gs_quant/tutorials/4_market_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Wrangling data summative # + #Library imports import bs4 as bs import pprint as pp import pandas as pd import re import mwparserfromhell as mwp import tldextract import urllib.request #### Part 1: Get the data's # Notes: # - Use page id in case countries change name (Czechia) # - (Nuisance of the XML tag called text) # - Keep two years separate # - keep "/n" returns in -- because # Function to get XML from files, iterate through pages and pull out id,title (country name) # and text into a pandas DataFrame object def makeDF(date): wikitext = open("WD_wikipediaCountries_{}.xml".format(date),"rb").read().decode("utf-8") wikisoup = bs.BeautifulSoup(wikitext, "lxml") rows = [] for c,i in enumerate(wikisoup.findAll("page")): newdic = {} newdic["country"] = i.title.text newdic["text{}".format(date[4:])] = i.find("text").text temp_df = pd.DataFrame([newdic.values()],index=[i.id.text] ,columns=newdic.keys()) rows.append(temp_df) wiki_df = pd.concat(rows,axis=0) return wiki_df df_2009 = makeDF("01012009") df_2019 = makeDF("07012019") mergedFrame = df_2009.merge(df_2019, left_index=True,right_index=True) #Check nothing has gone wrong - that both columns are aligned display(mergedFrame[mergedFrame["country_x"] != mergedFrame["country_y"]]) #delete "country_y" and rename "country_x" to country mergedFrame = mergedFrame.drop("country_y",axis=1) mergedFrame.columns = ["country","text2009","text2019"] bothyears = ["2009","2019"] #### Part 2 - cleaning ### Part 2.1 - Extracting links ## extract the links from a text def what_links(dirty_text, year, istest=False): ustlds = ["gov","edu","mil"] if istest == True: #Validating the external link extraction ext_link_regex = re.compile(r'https?://[\w\./?&=%]*') ext_links = ext_link_regex.findall(dirty_text) else: wikipage = mwp.parse(dirty_text,skip_style_tags=True) ext_links = wikipage.filter_external_links() counts = {"us_count{}".format(year):0,"other_count{}".format(year):0} us_suffixes = [] other_suffixes = [] for link in ext_links: if istest == True: url = link else: url = link.split(" ",1)[0] url = url.replace("[","") url = url.replace("]","") suffix = tldextract.extract(url).suffix if suffix in ustlds: counts["us_count{}".format(year)] += 1 us_suffixes.append(suffix) else: counts["other_count{}".format(year)] += 1 other_suffixes.append(suffix) counts["us_suffixes{}".format(year)] = set(us_suffixes) counts["other_suffixes{}".format(year)] = set(other_suffixes) return counts ## extract links from a dataframe def getLinks(df, years = bothyears,istest = False): if istest == True: test = "test" else: test="" for year in years: df["what_links{}{}".format(year,test)] = df.apply(lambda x: what_links(x["text{}".format(year)],year,istest=istest),axis=1) #Unpack the columns link_values = {"links_us{}{}".format(year,test):'us_count{}'.format(year), "links_other{}{}".format(year,test):'other_count{}'.format(year), "links_suffix_us{}{}".format(year,test):'us_suffixes{}'.format(year), "links_suffix_other{}{}".format(year,test):'other_suffixes{}'.format(year)} for col_name,dic_name in link_values.items(): df[col_name] = df["what_links{}{}".format(year,test)].map(lambda x: x[dic_name]) del df["what_links{}{}".format(year,test)] return df #Run the function mergedFrame = getLinks(mergedFrame) for year in bothyears: mergedFrame["links_total{}".format(year)] = mergedFrame.apply(lambda x: x["links_own{}".format(year)] + x["links_us{}".format(year)] + x["links_other{}".format(year)],axis=1) ## Tests on regex vs mwp mergedFrame = getLinks(mergedFrame, istest=True) mean_2009_own = (mergedFrame["links_own2009"] - mergedFrame["links_own2009test"]).mean() mean_2009_us = (mergedFrame["links_us2009"] - mergedFrame["links_us2009test"]).mean() mean_2019_own = (mergedFrame["links_own2019"] - mergedFrame["links_own2019test"]).mean() mean_2019_us = (mergedFrame["links_us2019"] - mergedFrame["links_us2019test"]).mean() print("2009 own: {}".format(mean_2009_own)) print("2009 US: {}".format(mean_2009_us)) print("2019 own: {}".format(mean_2019_own)) print("2019 US: {}".format(mean_2019_us)) # test_cols = [title for title in mergedFrame.columns if "test" in title] # for title in test_cols: # del mergedFrame[title] ### Part 2.2 - Getting article length in number of sentances # Steps/process: # - What we want to tidy up is any place where standardised by Wikipedia # - With wiki parser # - Remove internal links which are lists, categories or languages BUT not Images (as contain text) # - Remove navigation elements (templates) as same across categories BUT not columns (as contain text) # - Remove tags as these these were not handled effectively # - Split Image links manually (as wmparserfromhell has issues) # - Clean out the columns so that we just get text # - automated get display text on page -- removal of all other elements (external links, references, html elements etc) ### Define some functions to clean up the data ## Define what needs to be removed def getCleaning(wikicode): templates = wikicode.filter_templates() templates = [template for template in templates if "column" not in template] tags = wikicode.filter_tags() #remove <ref></ref> int_links = wikicode.filter_wikilinks() int_links_bad = [link for link in int_links if ':' in link and 'Image' not in link] #Remove everything thats not an image int_links_bad += [link for link in int_links if 'List' in link] #remove links to lists to_clean = templates +int_links_bad + tags return to_clean ## Get the text from columns and images then strip everything def tidyPage(clean_wikicode): new_int_links = clean_wikicode.filter_wikilinks() #clean the links new_int_links = set([str(link) for link in new_int_links]) for link in new_int_links: if "Image" in link: #get the display text out of the image wrapper splitimage = link.split("|") imagetext = splitimage[len(splitimage)-1] imagetext = re.sub("]]$","",imagetext).strip() try: clean_wikicode.replace(link,str(imagetext)) except: pass print("Error with image: {}".format(imagetext)) #Catches images with no text new_templates = clean_wikicode.filter_templates() for column in new_templates: #get the text out of the columns in a table wrapper col = re.sub("\n","",str(column)) splitcols = col.split("|col") splitcols = splitcols[1:] splitcols = [col.split("=",1)[1] for col in splitcols] colphrase = ' '.join(splitcols) try: clean_wikicode.replace(str(column),splitcols) except: pass print("Error in columns") output_code = clean_wikicode.strip_code() return output_code ## Run all of this to clear out the gubbins def cleanPage(page): wikipage = mwp.parse(page,skip_style_tags=True) obj_to_remove = getCleaning(wikipage) for item in obj_to_remove: try: wikipage.remove(item) except: pass #when item has already been removed clean_wikicode = tidyPage(wikipage) return clean_wikicode #Apply cleaning mergedFrame["clean_text2019"] = mergedFrame["text2019"].map(lambda x: cleanPage(x)) mergedFrame["clean_text2009"] = mergedFrame["text2009"].map(lambda x: cleanPage(x)) ##remove numbers with decimal place in between for year in bothyears: mergedFrame['clean_text{}'.format(year)] = mergedFrame['clean_text{}'.format(year)].map(lambda x: re.sub("[0-9]\.[0-9]",",",x)) ##get number of sentances for year in bothyears: mergedFrame['sent_length{}'.format(year)] = mergedFrame['clean_text2{}'.format(year)].map(lambda x: len(re.compile(r"[A-Z][^\.!?]*[\.!?]").findall(x))) ## Test sentence regex on random articles import random rand_articles = [random.randint(1,196) for x in range(10)] for c,art in enumerate(rand_articles): if c % 2 == 0: year = "2009" else: year = "2019" display(mergedFrame["clean_text{}".format(year)][art]) ### Part 2.3 - prepping for analysis #Exclude short articles and US mergedFrame["exclude"] = ((mergedFrame["links_us2009"] == 0) & (mergedFrame["sent_length2009"] < 10)) | (mergedFrame["country"]=="United States") mergedFrame.to_csv('mergedFrame.csv') #### Part 3 - analysis #new libraries import matplotlib.pyplot as plt import seaborn as sns % matplotlib inline from scipy import stats #Open CSV merged_df = pd.read_csv('mergedFrame.csv',index_col=0) # display(mergedFrame.head()) valid_merged = merged_df[merged_df["exclude"]==False] # print(len(valid_merged)) ### Part 3.1 - link analysis #link descriptives link_desc_df = valid_merged[[name for name in valid_merged.columns if "links_us" in name]].describe() display(link_desc_df) link_desc_df.to_csv("link_descriptives.csv") #link t-test link_ttest = stats.ttest_rel(valid_merged["links_us2019"],valid_merged["links_us2009"]) link_ttest_outputs = [{"type":"link","test":link_ttest[0],"pvalue":link_ttest[1]}] link_ttest_df = pd.DataFrame(link_ttest_outputs) link_ttest_df.to_csv("link_ttest.csv") display(link_ttest_df) #link plot link_plot = sns.distplot(valid_merged["links_us2009"],color="red",label="2009") # plt.show() sns.distplot(valid_merged["links_us2019"],color="blue",label="2019") # plt.show() link_plot.set_title("Number of links to US top-level domains") link_plot.set(xlabel="Links to US sources",ylabel="Density") link_plot.legend() # plt.xlim(right= 0.035) fig1 = link_plot.get_figure() fig1.savefig(fname ="link_plot.png",dpi =500) plt.show() ### Part 3.2 - sentence analysis #article descriptives art_desc_df = valid_merged[[name for name in valid_merged.columns if "sent_length" in name]].describe() display(art_desc_df) art_desc_df.to_csv("art_descriptives.csv") #article t-test art_ttest = stats.ttest_rel(valid_merged["sent_length2019"],valid_merged["sent_length2009"]) art_ttest_outputs = [{"type":"sent length","test":art_ttest[0],"pvalue":art_ttest[1]}] art_ttest_df = pd.DataFrame(art_ttest_outputs) art_ttest_df.to_csv("art_ttest_outputs.csv") display(art_ttest_df) #plot for article art_plot = sns.distplot(valid_merged["sent_length2009"],color="red",label="2009") #,kde=False sns.distplot(valid_merged["sent_length2019"],color="blue",label="2019") art_plot.set_title("Number of sentences per article") art_plot.set(xlabel="Number of sentences",ylabel="Density") art_plot.legend() fig1 = art_plot.get_figure() fig1.savefig(fname ="art_plot.png",dpi =500) plt.show() #article correlation bothyears = ["2009","2019"] # bothlinks = ["links_us"] #"links_own", correl_list = [] for year in bothyears: tempdic = {} pearson = stats.pearsonr(x = valid_merged["links_us{}".format(year)], y = valid_merged["sent_length{}".format(year)]) tempdic["pearson_test"] = pearson[0] tempdic["pearson_pvalue"] = pearson[1] temp_df = pd.DataFrame([tempdic.values()],index=[year] ,columns=tempdic.keys()) correl_list.append(temp_df) sns.jointplot(x = valid_merged["links_us{}".format(year)], y = valid_merged["sent_length{}".format(year)]) plt.show() correl_link_df = pd.concat(correl_list,axis=0) correl_link_df.to_csv("art_correlation.csv") display(correl_link_df) ### Part 3.3 - link per sentence analysis #links per sentence columns bothyears = ["2009","2019"] for year in bothyears: valid_merged["lps_us{}".format(year)] = valid_merged.apply(lambda x: x.loc["links_us{}".format(year)]/x.loc["sent_length{}".format(year)],axis=1) #descriptives for lps lps_desc_df = valid_merged[[name for name in valid_merged.columns if "lps_us" in name]].describe() display(lps_desc_df) lps_desc_df.to_csv("lps_descriptives.csv") #t-test for lps lps_ttest = stats.ttest_rel(valid_merged["lps_us2019"],valid_merged["lps_us2009"]) lps_ttest_outputs = [{"type":"lps","test":lps_ttest[0],"pvalue":lps_ttest[1]}] lps_ttest_df = pd.DataFrame(lps_ttest_outputs) lps_ttest_df.to_csv("lps_ttest_outputs.csv") display(lps_ttest_df) #plot for lps lps_plot = sns.distplot(valid_merged["lps_us2009"],color="red",label="2009") #,kde=False sns.distplot(valid_merged["lps_us2019"],color="blue",label="2019") lps_plot.set_title("Links to US top-level domains per sentence in article") lps_plot.set(xlabel="Links to US sources per sentence in article",ylabel="Density") lps_plot.legend() plt.ylim(top= 37) fig1 = lps_plot.get_figure() fig1.savefig(fname ="lps_plot.png",dpi =500) plt.show()
clean_wrangling_data_summative.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy import scipy import scipy.sparse import sklearn from sklearn.feature_extraction.text import TfidfTransformer from sklearn.cluster import KMeans import sklearn.metrics.pairwise import string import collections # + from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cluster import KMeans from sklearn.metrics import adjusted_rand_score from nltk import word_tokenize, regexp_tokenize from nltk.stem import PorterStemmer from nltk.corpus import stopwords from sklearn.cluster import KMeans from sklearn.feature_extraction.text import TfidfVectorizer # - def process_text(text, stem=True): """ Tokenize text in sets of 2 words in addition to by word """ text = text.translate(string.punctuation) words = word_tokenize(text) clean_words = [] stopwds = set(stopwords.words('english')) for w in words: if w not in stopwds: clean_words.append(w) wordstr = " ".join(clean_words) setsOf2 = regexp_tokenize(wordstr, pattern = r"(?=\b([a-zA-Z]{1,40} [a-zA-Z]{1,40}))") tokens = setsOf2 + clean_words return tokens # + descriptions = [] with open('coco_val.txt', encoding = "utf8") as f: for line in f: text = line.lower() ## Lowercase all characters text = text.replace("[comma]"," ") ## Replace [commas] with empty space for ch in text: if ch < "0" or (ch < "a" and ch > "9") or ch > "z": ## The cleaning operation happens here, remove all special characters text = text.replace(ch," ") text = ' '.join(text.split()) ## Remove double spacing from sentences descriptions.append(text) dataSet = numpy.array(descriptions) # + vectorizer = TfidfVectorizer(tokenizer=process_text, stop_words='english') TfIdf_dataSet = vectorizer.fit_transform(dataSet) #print("What our Tf-Idf looks like: ","\n") #print(TfIdf_dataSet[0:1]) #print(vectorizer.get_feature_names(),"\n") vectorVocab = vectorizer._validate_vocabulary() # - cosineSimilarity = sklearn.metrics.pairwise.cosine_similarity(TfIdf_dataSet) print(cosineSimilarity) numpy.fill_diagonal(cosineSimilarity,1.1) cosineSimilaritySorted = numpy.argsort((-1*(cosineSimilarity)),axis=1) #print(cosineSimilaritySorted) cosineSimilaritySorted = numpy.argsort((-1*(cosineSimilarity)),axis=1) top5similar = (cosineSimilaritySorted[:,0:5]) print() print(top5similar) numpy.savetxt("results.csv", top5similar.astype(int), fmt='%i', delimiter=",")
Old/.ipynb_checkpoints/Group assignment 2 Word Tokenizing-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Animando os vértice do triângulo # # Neste exemplo, é utilizada a mesma abordagem de animação do notebook ([20_Animando_as_cores_dos_triangulos](20_Animando_as_cores_dos_triangulos.ipynb)). A diferença está no dado que foi animado. Neste exemplo, foi animada a posição de um dos vértices de cada triângulo. Para isso, foi utilizada a variável 'interpolation_factor' para calcular as novas posições de vértices ('triangle01_new_vertex_v3' e 'triangle02_new_vertex_v1'), um para cada triângulo. Os vetores de posição de cada triângulo (vertexPosition01 e vertexPosition02) são atualizados com as novas posições e depois esses vetores são utilizados para atualizar os dados que estão na GPU através do método 'updateVertexPositions()'. # + import time import math import numpy as np import OpenGL.GL as gl from PyQt5 import QtOpenGL from PyQt5.QtWidgets import QApplication from cg.shader_programs.SimpleShaderProgram_v1 import SimpleShaderProgram from cg.renderers.ModelRenderer_v1 import ModelRenderer class MyWidget(QtOpenGL.QGLWidget): def initializeGL(self): # posição de cada vértice do primeiro triângulos self.vertexPosition01 = np.array([ -0.90, -0.90, 0.0, 1.0, # Triângulo 1 0.85, -0.90, 0.0, 1.0, -0.90, 0.85, 0.0, 1.0], dtype=np.float32) # posição de cada vértice do segundo triângulos self.vertexPosition02 = np.array([ 0.90, -0.85, 0.0, 1.0, # Triângulo 2 0.90, 0.90, 0.0, 1.0, -0.85, 0.90, 0.0, 1.0], dtype=np.float32) # cria para cada triângulo um objeto responsável por carregar os dados para a GPU e renderizá-los self.triangleRenderer01 = ModelRenderer(self.vertexPosition01) self.triangleRenderer02 = ModelRenderer(self.vertexPosition02) # cria um shader program simples self.shaderProgram = SimpleShaderProgram() # ativa o shader programa para permitir configurar uma cor única para todos os vértices self.shaderProgram.bind() self.shaderProgram.useUniformColor(True) self.shaderProgram.setUniformColor(np.array([0.0, 0.0, 0.5, 1.0], dtype=np.float32)) self.shaderProgram.release() # recupera o endereços da variável de entrada do shader program position_loc = self.shaderProgram.getVertexPositionLoc() # configura os dados do modelo para serem os dados de entrada do shader program self.triangleRenderer01.setVertexPositionLoc(position_loc) self.triangleRenderer02.setVertexPositionLoc(position_loc) # armazena o momento que o programa começou self.startTime = time.time() def paintGL(self): # configura a cor de background gl.glClearColor(0, 0, 0, 1) # limpa o background com a cor especificada gl.glClear(gl.GL_COLOR_BUFFER_BIT) # calcula o tempo de execução do programa self.currentTime = time.time() time_difference = self.currentTime - self.startTime # calcula o fator de interpolação interpolation_factor = (math.sin(time_difference) + 1) / 2 triangle01_v1 = np.array([-0.90, -0.90, 0.0, 1.0], dtype=np.float32) triangle01_v3 = np.array([-0.90, 0.85, 0.0, 1.0], dtype=np.float32) triangle02_v1 = np.array([0.90, -0.85, 0.0, 1.0], dtype=np.float32) triangle02_v2 = np.array([0.90, 0.90, 0.0, 1.0], dtype=np.float32) # calcula o novo vértice de cada triângulo triangle01_new_vertex_v3 = interpolation_factor * triangle01_v3 + (1 - interpolation_factor) * triangle01_v1 triangle02_new_vertex_v1 = interpolation_factor * triangle02_v1 + (1 - interpolation_factor) * triangle02_v2 # atualiza os dados nos vetores self.vertexPosition01[8:] = triangle01_new_vertex_v3 self.vertexPosition02[0:4] = triangle02_new_vertex_v1 # atualiza os dados dos triângulso na GPU self.triangleRenderer01.updateVertexPositions(self.vertexPosition01) self.triangleRenderer02.updateVertexPositions(self.vertexPosition02) # ativa o shader program que será executado pela GPU self.shaderProgram.bind() # renderiza o primeiro triângulo self.triangleRenderer01.render() # renderiza o segundo triângulo self.triangleRenderer02.render() # desativa o shader program self.shaderProgram.release() # solicita que o método paintGL seja chamado novamente self.update() def resizeGL(self, width, height): # atualiza a área de renderização para ser a janela inteira gl.glViewport(0, 0, width, height) def main(): import sys #Criação de um aplicativo Qt app = QApplication(sys.argv) #Especificação do contexto OpenGL glformat = QtOpenGL.QGLFormat() glformat.setVersion(3, 3) glformat.setDoubleBuffer(True) glformat.setProfile(QtOpenGL.QGLFormat.CoreProfile) #Criação da janela de renderização w = MyWidget(glformat) w.resize(640, 480) w.setWindowTitle('OpenGL example') w.show() sys.exit(app.exec_()) if __name__ == '__main__': main() # - # ! jupyter nbconvert --to python 21_Animando_os_vertice_do_triangulo.ipynb # %run -i 21_Animando_os_vertice_do_triangulo.py
1S2020/21_Animando_os_vertice_do_triangulo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Benchmarking sorting algorithms # The final project assignment for Computational Thinking with Algorithms module, GMIT 2020. # # Lecturer: dr <NAME> # # >Author: **<NAME>** # >Github: [andkoc001](https://github.com/andkoc001/) # >Email: <EMAIL> # # <figure> # <img src="https://cdn1.byjus.com/wp-content/uploads/2019/06/Ascending-Order.png" alt="Sorting" style="width:400px"> # <figcaption>Image source: Byjus.com.</figcaption> # </figure> # ___ # ### Content # # 1. Introduction # 2. Sorting algorithms # 1. Bubble Sort # 2. Quicksort # 3. Bucket Sort # 4. Merge Sort # 5. Timsort # 3. Benchmarking # 1. About the benchmark # 2. Benchmarking procedure # 4. Findings / Discussion # # ___ # ## 1. Introduction # ### What is sorting? # # There is a question put on Quora.com on what is sorting. One of the answers is as follows: “It’s the process of arranging all objects within a collection by comparing one or more of it’s attributes (...). The objects must be of the same type since they must have all the attributes in common“ ([Quora - What is sorting?](https://www.quora.com/What-is-sorting-2)). # # According to Comen, “sorting can be defined as a process of arranging every element within the collection (e.g. array) in a certain order, e.g. ascending. Cormen et al. defines sorting as follows: for a sequence of n numbers $(a1, a2, …, an)$ sorting is such a permutation (reordering) of the input sequence that $a1 ≤ a2 ≤ … ≤ an$“ ([<NAME> et. al, 2009](https://web.ist.utl.pt/~fabio.ferreira/material/asa/clrs.pdf)). # # Sorting is one of the most fundamental problems of computing. “In the early days of computing, the common wisdom was that up to thirty percent of all computing cycles were spent in sorting” ([<NAME> & <NAME>, 2011](https://www.iasj.net/iasj?func=fulltext&aId=36934)). Although many efficient algorithms have been developed over the time, this research topic remains valid. # # ### Algorithm complexity # # Very important in computer science is the question of performance of algorithms, and more precisely - the rate of growth of the algorithm complexity. As each operation performed by a computer takes up resources, like time or memory, and various algorithms - even leading to the same result - require an unequal amount of the resources. In practical terms, the time needed for an algorithm to complete its task depends on various factors, among others, such as: # the input size, # # * the initial state of the input, like elements order, # * number of operations needed, # * processor performance, # * memory allocation (auxiliary variables are held in the memory), etc. # # # Algorithms analysis is not concerned with external factors such as processing power. In order to eliminate the effect of external determinants, algorithms analysis abstracts them and purely on the algorithm itself. "When trying to characterize an algorithm’s efficiency in terms of execution time, independent of any particular program or computer, it is important to quantify the number of operations or steps that the algorithm will require" ([<NAME> & <NAME>, 2005](https://runestone.academy/runestone/books/published/pythonds/index.html)). # # Consequently, it is relevant to consider algorithms’ efficiency, often described with reference to complexity in terms of time and complexity in terms of space (memory) - how fast would the function grow with linearly increasing input size. # # The problem of sorting is of a complex nature. In fact, there are several closely connected concerns relating to it. The concept of algorithms is one of them. Computers operate on sequential tasks, performed one by one. An algorithm is any well-defined computational procedure that takes some value, or set of values, as input and produces some value, or set of values, as output. "An algorithm is thus a sequence of computational steps that transform the input into the output" ([<NAME> et. al, 2009](https://web.ist.utl.pt/~fabio.ferreira/material/asa/clrs.pdf)). For a successful algorithm, the instructions must be clear (unambiguous), definite and leading to a finite result. # # ### Big O notation # # Algorithms efficiency is usually described using asymptotic analysis, that is approximation of a function that is asymptotically equivalent ([Wikipedia - Asymptotic analysis](https://www.wikipedia.com/en/Asymptotic_analysis)). In algorithms analysis, this property is traditionally denoted with so-called big O notation, meaning the complexity of the algorithm is at most of such a degree ([Wikipedia - Big O notation](https://en.wikipedia.org/wiki/Big_O_notation)). In other words, the function's growth rate is limited from above by such a function, but it may actually grow slower. # # If a running time is $O(f(n))$, the running time is at most $k∙f(n)$. (...) We use big-O notation for asymptotic upper bounds, since it bounds the growth of the running time from above for large enough input sizes. ([Khan Academy - asymptotic notation](https://www.khanacademy.org/computing/computer-science/algorithms#asymptotic-notation)) # # <figure> # <img src="https://cdn.kastatic.org/ka-perseus-images/501211c02f4c6765f60f23842450e1151cfd9c89.png" alt="Big O notation"> # <figcaption>Image source: Khan Academy.</figcaption> # </figure> # # It is worth noting that there are also other notations used in code analysis, such as Omega, Thata and others ([<NAME>, 2017](http://www2.unb.ca/~owen/courses/2383-2018/using-big-theta.pdf)). # # It is often convenient to visualise the relationship between input size and time (or space) on a graph. The curve showing the relationship is then approximated by a mathematical function, and simplified with the most significant part of the function. For example, when the curve is an approximation of the square function, it is denoted as O(n2) and means that for n elements to be sorted, time needed for completion of the algorithm is n2. Below figure and table shows the time complexity for various numbers of elements in the array (n) ([<NAME>. al, 2009](https://web.ist.utl.pt/~fabio.ferreira/material/asa/clrs.pdf), [<NAME>, 1998](https://en.wikipedia.org/wiki/The_Art_of_Computer_Programming), [<NAME> & <NAME>, 2011](https://algs4.cs.princeton.edu/home/)) # # <figure> # <img src="https://upload.wikimedia.org/wikipedia/commons/7/7e/Comparison_computational_complexity.svg" alt="Big O notation" style="width:400px"> # <figcaption>Image source: Wikipedia.</figcaption> # </figure> # For various input data sizes n (e.g the number of elements in the array) there will be different resource consumption N (e.g. time, memory). # # Time required for completion of a task, such as sorting an array depends greatly on the algorithm complexity. The below table shows the relationship between the number of elements n and the time it would take for the selected efficiencies (reproduced from: https://youtu.be/LKiaoV86iJo?t=2494) # # | Array size $n$ | 10 | 20 | 50 | 100 | 200 | 1000 | # | --- | --- | --- | --- | --- | --- | --- | # | $log n$ | 3.32 ns | 4.23 ns | 5.64 ns | 6.64 ns | 7.64 ns | 9.97 ns | # | $n$ | 10 ns | 20 ns | 50 ns | 100 ns | 200 ns | 1 $\mu$s | # | $n log n$ | 33.21 ns | 86.44 ns | 282.2 ns | 664.4 ns | 1.54 $\mu$s | 9.97 $\mu$s | # | $n2$ | 100 ns | 400 ns | 2.5 $\mu$s | 10 $\mu$s | 40 $\mu$s | 1 ms | 2 n | 1 $\mu$s | 1.05 ms | # | $2n$ | 1 $\mu$s | 1.05 ms | 13 days | $4x10^{13}$ years | $5.1x10^{43}$ years | $3.4x10^{284}$ years | # | $n!$ | $3.6$ ms | 77 years | $9.6x10^{44}$ years | $3x10^{141}$ year | $2.5x10^{358}$ years | $1.27x10^{2551}$ years | # # It is a valid question to realise that the data to be sorted may be in various states of initial order. It is a less consuming process to sort an array that is almost sorted already. In practice, three scenarios are considered: average, worst, and best case. For this reason, while benchmarking the sorting algorithms, it is vital to use the same arrays. # # <figure> # <img src="https://miro.medium.com/max/1192/1*ipkeWQ_Lb0lbkhB8rigxTA.png" alt="Big O notation" style="width:596px"> # <figcaption>Image source: Bigocheatsheet.com, Medium.com</figcaption> # </figure> # Sorting algorithms aggregated into two classes: comparison-based sorting and partition-based sorting. In the former the arrangement of the elements is performed on comparing the elements’ value. In the latter, the arrangement is achieved by division. For instance, in the bucket sorting algorithm, all the values are divided into groups - buckets, and the elements are distributed between the groups. "Surprisingly, there are # potentially faster ways to sort elements if you know something about those elements # in advance" ([<NAME> et al, 2016](https://www.amazon.com/Algorithms-Nutshell-Practical-George-Heineman/dp/1491948922)). # # There are several further qualities of sorting algorithms. One of the most important is in-place sorting. It occurs if it requires only a fixed amount of additional, other than the given array, storage space during the sorting process, i.e. O(1). Opposite to this one is out-of-place sorting, which requires extra storage space on top of the given array, and creates another auxiliary array of data while the sorting process continues. # # Stable sorting algorithms maintain the order in which elements of the same value (key). This means that if prior to sorting one element of a particular value occurred before the other element of the same value, then after stable sorting this order will be always kept. # Application of data sorting algorithms can be found in numerous instances of computer programs. Very often the overall efficiency of a program depends on the sorting algorithm it uses. It is, therefore, important to understand the concept of algorithms complexity and know that there are different sorting algorithms. This would allow to select the most suitable algorithm for the purpose and to optimise efficiency of the sorting process # ___ # # ## 2.1 Bubble Sort # ### Algorithm description # # Key properties (Wikipedia, FreeCodeCamp): # * Time complexity: $O(n^2)$ # * Space complexity: $O(1)$ # * Stable: Yes # * In-place: Yes # * Method: Exchanging # # <figure> # <img src="https://upload.wikimedia.org/wikipedia/commons/5/54/Sorting_bubblesort_anim.gif" alt="Bubble Sort" style="width:300px; height:200px"> # <figcaption>Image source: Wikipedia.</figcaption> # </figure> # # Bubble sort is considered as one of the conceptually simplest sorting algorithms, but by the expense of time and space complexity, which are respectively O(n2) and O(1) ([Wikipedia - Bubble sort](https://en.wikipedia.org/wiki/Bubble_sort)). # # "The idea is to make repeated passes up the array. On each pass the next lightest value will appear in the proper place. Assuming the array is indexed [0...n-1], we require (n − 1) passes to guarantee that the array is sorted. The bubbling process compares adjacent values and insures that the larger of the two is on top" ([<NAME> & <NAME>, 2011](https://www.iasj.net/iasj?func=fulltext&aId=36934)). # # The below figure shows the process of Bubble sort. The steps represent iteration over the array’s elements. In step 1, the first two elements of the array are compared to each other. If the first one is greater than the other, a swap takes place. In other words, the greater element is moved forward. Next, element no.2 and element no.3 are compared, and again the greater is left in the front. Systematically, the greater element of each comparison is pushed forward until it encounters a greater one (and then they swap) or reaches the end of the array. By the time step 1 is over, all elements are compared, the largest element is identified and pushed at its final (sorted) position. In the next step this element is excluded from sorting. In step 2, the same procedure is repeated from the beginning. The array is being sorted iteratively in each step. # <figure> # <img src="https://he-s3.s3.amazonaws.com/media/uploads/2682167.png" alt="Bubble Sort" style="width:740px"> # <figcaption>Image source: Hackerearth.com</figcaption> # </figure> # # ### Algorithm implementation # + ###################################### # Bubble Sort ###################################### ##### Credits ##### # Source: https://stackabuse.com/sorting-algorithms-in-python/ # Adapted and commented by the author of this Notebook ##### Function definition ##### # Function performing the bubble sort; it takes an arrey to be sorted as an argument def bubble_sort(array): # Variable swapped is initially set to True, so that the loop runs at least once swapped = True while swapped: swapped = False # loop through each element of the array for i in range(len(array) - 1): # compare current (i-th) element with the next one if array[i] > array[i + 1]: # when the above condition is satisfied, swap the elements array[i], array[i + 1] = array[i + 1], array[i] # Set the swapped to True so it will loop again swapped = True # + # Verify the algorithm works random_list_of_nums = [2, 7, 1, 8, 4, 1] print("Before:",random_list_of_nums) bubble_sort(random_list_of_nums) print("After:",random_list_of_nums) # - # ## 2.2 Quicksort # ### Algorithm description # # Key properties (Wikipedia): # * Time complexity: $O(n^2)$ # * Space complexity: $O(logn)$ # * Stable: No # * In-place: Yes # * Method: Partitioning # # <figure> # <img src="https://upload.wikimedia.org/wikipedia/commons/6/6a/Sorting_quicksort_anim.gif" alt="Quicksort" style="width:300px; height:200px"> # <figcaption>Image source: Wikipedia.</figcaption> # </figure> # # Quicksort is a highly efficient sorting algorithm. It is an example of a divide and conquer method. "Quicksort sorts a list effectively by dividing the list into smaller and smaller lists, and sorting the smaller lists in turn" ([<NAME>, 2004](http://www.cs.carleton.edu/faculty/adalal/teaching/f04/117/notes/searchSort.pdf)). The procedure is repeated recursively ([<NAME> & <NAME>, 2011](https://www.iasj.net/iasj?func=fulltext&aId=36934)). This is an in-place algorithm and is not stable. # # The basic idea behind quicksort is this: Specify one element in the list as a “pivot” point. Then, go through all of the elements in the list, swapping items that are on the “wrong” side of the pivot. In other words, swap items that are smaller than the pivot but on the right side of the pivot with items that are larger than the pivot but on the left side of the pivot. Once you’ve done all possible swaps, move the pivot to wherever it belongs in the list. Now we can ignore the pivot, since it’s in position, and repeat the process for the two halves of the list (on each side of the pivot). We repeat this until all of the items in the list have been sorted ([<NAME>, 2004](http://www.cs.carleton.edu/faculty/adalal/teaching/f04/117/notes/searchSort.pdf)). # <figure> # <img src="https://algs4.cs.princeton.edu/23quicksort/images/quicksort-overview.png" alt="Quicksort"> # <figcaption>Image source: https://algs4.cs.princeton.edu/23quicksort/</figcaption> # </figure> # # # Quicksort is a divide-and-conquer method for sorting. It works by partitioning an array into two parts, then sorting the parts independently. "The crux of the method is the partitioning process, which rearranges the array to make the following three conditions hold: # * The entry a[j] is in its final place in the array, for some j. # * No entry in a[lo] through a[j-1] is greater than a[j]. # * No entry in a[j+1] through a[hi] is less than a[j]. # # We achieve a complete sort by partitioning, then recursively applying the method to the subarrays" ([<NAME>, <NAME>, 2011](https://algs4.cs.princeton.edu/home/)). # # <figure> # <img src="https://algs4.cs.princeton.edu/23quicksort/images/partitioning-overview.png" alt="Quicksort"> # <figcaption>Image source: https://algs4.cs.princeton.edu/23quicksort/</figcaption> # </figure> # # # The process of the Quicksort is illustrated in the below figure. # <figure> # <img src="https://algs4.cs.princeton.edu/23quicksort/images/quicksort.png" alt="Quicksort"> # <figcaption>Image source: https://algs4.cs.princeton.edu/23quicksort/</figcaption> # </figure> # ### Algorithm implementation # + ###################################### # Quicksort ###################################### ##### Credits ##### # Source: https://youtu.be/u4tVQszsyEQ # Adapted and commented by the author of this Notebook ##### Function definition ##### # Function performing the quick sort; it takes an array to be sorted as an argument def sortowanie_szybkie(array): # creation of empty lists mniejsze = [] # less than the pivot rowne = [] # equal to the pivot wieksze =[] # greater than the pivot # base case of the recursion # check whether the array is more than one element long (otherwise, one-element list is considered to be sorted) if len(array) <= 1: return array # recursion case, when the array contains more than one element else: # set the pivot value at the middle element of the list middle = (len(array))//2 pivot = array[middle] # let's consider three cases for each element of the list for x in array: # case #1 - the current element is greater than the pivot if x > pivot: wieksze.append(x) # add the current element to the list "wieksze" # case #2 - the current element is equal to the pivot elif x == pivot: rowne.append(x) # add the current element to the list "rowne" # case #3 - the current element is less than the pivot else: mniejsze.append(x) # add the current element to the list "mniejsze" # as a result of the above loop, the function will return: # in the middle: the element(s) that has just been sorted, i.e. equal to the pivot (as well as those sorted on previous recurses) # on the left-hand side: elements that are less than the pivot - still unsorted, therefore the same function is called recursively (with the "mniejsze" list as an argument) # on the the right-hand side: elements that are greater than the pivot - still unsorted, therefore the same function is called recursively (with the "wieksze" list as an argument) return sortowanie_szybkie(mniejsze) + rowne + sortowanie_szybkie(wieksze) # - # Verify the algorithm works random_list_of_nums = [2, 7, 1, 8, 4, 1] print("Before:",random_list_of_nums) print("After:",sortowanie_szybkie(random_list_of_nums)) # ## 2.3 Bucket Sort # ### Algorithm description # # Key properties (Wikipedia): # * Time complexity: $O(n^2)$ # * Space complexity: $O(nk)$ # * Stable: Yes # * In-place: No # * Method: Partitioning # # Bucket Sort is an example of a non-comparison class algorithm. It is particularly useful when sorting relatively uniformly distributed elements in the array. When the input is drawn from a uniform distribution, bucket sort runs in linear time ([<NAME> et. al, 2009](https://web.ist.utl.pt/~fabio.ferreira/material/asa/clrs.pdf)) # # The idea of the bucket sorting algorithm is as follows. First the given range of elements is divided into k sub-intervals (buckets) of the same size and desired sequence. Then, the elements are assigned to the appropriate buckets. For uniformly distributed elements in the array (which is a presumed condition), each bucket would receive a similar - and relatively small - number of the elements. After that, elements inside the buckets are sorted, typically using Insert Sort. Finally, the content of the buckets are put together (append) while maintaining the already arranged sequence. # # It is also possible to sort the content of the buckets by recursively calling the Bucket sort algorithm ([Programing-algorithms.net - Bucket sort](http://www.programming-algorithms.net/article/41160/Bucket-sort)). # # The below figure shows the idea of the Bucket sort process. # # <figure> # <img src="https://cdn.programiz.com/sites/tutorial2program/files/Bucket_2.png" alt="Bucket Sort" style="width:500px"> # <figcaption>Image source: https://www.programiz.com/dsa/bucket-sort</figcaption> # </figure> # ### Algorithm implementation # + ###################################### # Bucket Sort ###################################### ##### Credits ##### # Own implementation, developed based on pseudocode from https://youtu.be/geVyIsFpxUs # Adapted and commented by the author of this Notebook # Auxiliary function that sorts the content of each bucket, using the insert sort # Adapted from: https://www.geeksforgeeks.org/bucket-sort-2/ # the function takes one argument, a list def insertSort(list): # loop through all elements of the list for i in range(1, len(list)): #temorary variable j = i-1 # j is equal to the previous iteration number # inner loop - executed as long as two conditions are satisfied while list[j] > list[i] and j >= 0: # compare the current and the previous element and the index of the previous element is not-negative list[j+1] = list[j] # swap j -= 1 # decrementation list[j+1] = list[i] # move to the next element return list ##### Function definition ##### # import required external libraries import math # will be needed for ceil() method # define the function, which takes as an argument the array to be sorted def bucket_sort(array): #print("Original list:", arr) # for testing # number of buckets n_buckets = 6 # assumed arbitrarily # create an empty array of buckets, where each bucket is also an empty array bucket = [] for i in range(n_buckets): bucket.append([]) # define a divider which will be used for sorting; # divider is the value of the maximum element of the array to be sorted divided by number of buckets divider = math.ceil((max(array)+1)/n_buckets) # divider = 10 # alternatively to the above line, it can be just assumed arbitrarily # sorting the array's element into the buckets (unsorted) # loop through the array for i in array: # determine into which bucket index will fall each element of the arrey j = i//divider # put the current i-element of the array to the corresponding bucket bucket[j].append(i) # for testing #print("Sorted unto the buckets:", bucket) # for testing # put sorted content of each bucket into a single array (concatenate single buckets) # adopted from https://gist.github.com/sahid/5022081 sorted_result = [] for i in range(n_buckets): # adding the sorted content of each bucket to the resulting array, # using the insertSort() function iteratively for each bucket sorted_result += insertSort(bucket[i]) return sorted_result # - # Verify the algorithm works random_list_of_nums = [2, 7, 1, 8, 4, 1] print("Before:",random_list_of_nums) print("After:",bucket_sort(random_list_of_nums)) # ## 2.4 Merge Sort # ### Algorithm description # # Key properties (Wikipedia): # * Time complexity: $O(n logn)$ # * Space complexity: $O(n)$ # * Stable: Yes # * In-place: No # * Method: Merging # # Merge Sort is another highly efficient algorithm. Like the Quicksort algorithm, it also relies on ‘divide and conquer’ strategy, but applies a different approach. It is considered as "a neat algorithm, because it’s the sort that sorts itself” ([<NAME>, 2004](http://www.cs.carleton.edu/faculty/adalal/teaching/f04/117/notes/searchSort.pdf)). According to Wolfram MathWorld, the algorithm was first proposed by <NAME> in 1945. # # # The main idea behind the algorithm is that two ordered lists are merged together into a single list ([<NAME>, 1998](https://en.wikipedia.org/wiki/The_Art_of_Computer_Programming)). "Merge sort starts by dividing the list to be sorted in half. Then, it divides each of these halves in half. The algorithm repeats until all of these “sublists” have exactly one element in them. At that point, each sublist is sorted. In the next phase of the algorithm, the sublists are gradually merged back together (hence the name), until we get our original list back - sorted, of course" ([<NAME>, 2004](http://www.cs.carleton.edu/faculty/adalal/teaching/f04/117/notes/searchSort.pdf)). # # "While comparing two sublists for merging, the first element of both lists is taken into consideration. While sorting in ascending order, the element that is of a lesser value becomes a new element of the sorted list. This procedure is repeated until both the smaller sublists are empty and the new combined sublist comprises all the elements of both the sublists" ([Hackerearth.com](https://www.hackerearth.com/practice/algorithms/sorting/)). # # <figure> # <img src="https://www.101computing.net/wp/wp-content/uploads/Merge-Sort-Algorithm.png" alt="Bubble Sort" style="width:640px"> # <figcaption>Image source: https://www.101computing.net/merge-sort-algorithm/</figcaption> # </figure> # # Below is the algorithm process visualisation taken from Princeton University lectures materials. # # <figure> # <img src="https://algs4.cs.princeton.edu/22mergesort/images/mergesortTD.png" alt="Bubble Sort"> # <figcaption>Image source: https://algs4.cs.princeton.edu/22mergesort/</figcaption> # </figure> # ### Algorithm implementation # + ###################################### # Merge Sort ###################################### ##### Credits ##### # Source: https://stackabuse.com/sorting-algorithms-in-python/ # Adapted and commented by the author of this Notebook # Auxiliary function, merging and sorting two arrays def merge(left_list, right_list): sorted_list = [] left_list_index = right_list_index = 0 # We use the list lengths often, so its handy to make variables left_list_length, right_list_length = len(left_list), len(right_list) for _ in range(left_list_length + right_list_length): if left_list_index < left_list_length and right_list_index < right_list_length: # We check which value from the start of each list is smaller # If the item at the beginning of the left list is smaller, add it to the sorted list if left_list[left_list_index] <= right_list[right_list_index]: sorted_list.append(left_list[left_list_index]) left_list_index += 1 # If the item at the beginning of the right list is smaller, add it to the sorted list else: sorted_list.append(right_list[right_list_index]) right_list_index += 1 # If we've reached the end of the of the left list, add the element from the right list elif left_list_index == left_list_length: sorted_list.append(right_list[right_list_index]) right_list_index += 1 # If we've reached the end of the of the right list, add the elements from the left list elif right_list_index == right_list_length: sorted_list.append(left_list[left_list_index]) left_list_index += 1 # final result of the sorting return sorted_list ##### Function definition ##### # Function performing the merge sort; it takes an array to be sorted as an argument def merge_sort(array): # If the list is a single element, return it if len(array) <= 1: return array # Use floor division to get midpoint, indices must be integers mid = len(array) // 2 # Sort and merge each half left_list = merge_sort(array[:mid]) right_list = merge_sort(array[mid:]) # Merge the sorted lists into a new one return merge(left_list, right_list) # - # Verify the algorithm works random_list_of_nums = [2, 7, 1, 8, 4, 1] print("Before:",random_list_of_nums) print("After:",merge_sort(random_list_of_nums)) # ## 2.5 Timsort # ### Algorithm description # # Key properties (Wikipedia): # * Time complexity: $O(n logn)$ # * Space complexity: $O(n)$ # * Stable: Yes # * In-place: No # * Method: Insertion and merging # # Timsort is a relatively recent sorting algorithm, implemented by <NAME> in 2002. It is an example of a hybrid sorting algorithm, combining Merge sort and Insertion sort in order to optimise the overall performance. The algorithm is characterised by its adaptive and stable properties ([bugs.python.org](https://bugs.python.org/file4451/timsort.txt)). # # The algorithm has replaced the Quick sort in some major programming languages (e.g. Python, Java), It is particularly suited to almost sorted inputs that exist in most real-world data ([Wikipedia, Timsort](https://en.wikipedia.org/wiki/Timsort)). Its time complexity is described as O(n logn) ([N. Auger et. al, 2018](https://drops.dagstuhl.de/opus/volltexte/2018/9467/pdf/LIPIcs-ESA-2018-4.pdf)) # # "In a nutshell, the main routine marches over the array once, left to right, alternately identifying the next run, then merging it into the previous runs "intelligently". Everything else is complication for speed, and some hard-won measure of memory efficiency" ([bugs.python.org](https://bugs.python.org/file4451/timsort.txt)). # # The algorithm divides the array into segments called runs, typically of size 32 to 64. # "In a dataset, a natural run is a state of the array wherein at least two consecutive elements are currently (relative to the array’s global and local states) in either ascending or descending order. Should the two elements not be in correct ascending order, they are simply reversed in place. Timsort makes a first-pass across the array in search of such runs, while concurrently seeking to identify a minrun, or the minimum size of an ordered list within the list" ([Medium, The case for Timsort](https://medium.com/@rscheiwe/the-case-for-timsort-349d5ce1e414)) # # <figure> # <img src="https://miro.medium.com/max/1400/0*9J7j17iPNp3E04iR.png" alt="Bubble Sort" style="width:500px"> # <figcaption>Image source:https://medium.com/@rscheiwe/the-case-for-timsort-349d5ce1e414</figcaption> # </figure> # # If a run is smaller than this minimum run size, insertion sort is used to add more elements to the run until the minimum run size is reached ([Wikipedia - Timsort](https://en.wikipedia.org/wiki/Timsort)). # # "If, say, the minrun is equal to or less than 64 elements, Timsort knows to reduce itself to Binary Insertion sort, which guarantees the implementation of merging only in cases where the “weight” of a minrun is too much (e.g., cases where merging cannot be efficiently implemented because the number of minruns is greater than the power of two). In such cases where the array is greater than 64 elements, the first-pass natural-run search determines said natural runs for the sake of merging. It is ideal to delay merging until the optimized time, given the fact that an array may not be fully reduced to its set of runs without the algorithms “knowledge” of exclusive or excluded elements, not to mention the array’s size" ([Medium, The case for Timsort](https://medium.com/@rscheiwe/the-case-for-timsort-349d5ce1e414)). # # These segments are then sorted using insertion sort and merged together, forming finally the sorted array. # # <figure> # <img src="https://corte.si/posts/code/timsort/64r-tim.png" alt="Bubble Sort" style="width:700px"> # <figcaption>Image source:https://corte.si/posts/code/timsort/index.html</figcaption> # </figure> # # This algorithm is similar to Merge sort, but was designed to take advantage of runs of consecutive ordered elements that already exist in most real-world data. "The advantage of merging ordered runs instead of merging fixed size sub-lists (as done by traditional mergesort) is that it decreases the total number of comparisons needed to sort the entire list" ([Wikipedia - Timsort](https://en.wikipedia.org/wiki/Timsort)). # # Timsort utilises a concept of ‘galloping’, which enhances the merging process. # "In galloping mode, Timsort takes two runs (called “sub-runs”, say X and Y) and checks, via binary search, if Y[0] could fit into X. If so, the entirety of the run is placed at the found position given the fact that the two runs are accordingly sorted due to the created of runs in Timsort’s previous step(s). Should Y[0] not fit in to Xat Y[0], then the process is reversed and Timsort attempts to insert X[0] into Y" ([Medium. com - The case for Timsort](https://medium.com/@rscheiwe/the-case-for-timsort-349d5ce1e414)). # # # <figure> # <img src="https://miro.medium.com/max/1024/0*tQAmlZPVcmcYLKaX.png" alt="Bubble Sort" style="width:500px"> # <figcaption>Image source:https://medium.com/@rscheiwe/the-case-for-timsort-349d5ce1e414</figcaption> # </figure> # # Timsort is now commonly commented as the most efficient sorting algorithm currently available for real-life data ([Medium.com](https://medium.com/@george.seif94/this-is-the-fastest-sorting-algorithm-ever-b5cee86b559c), [Hackernoon.com](https://hackernoon.com/timsort-the-fastest-sorting-algorithm-youve-never-heard-of-36b28417f399)). # ### Algorithm implementation # + ###################################### # Timsort ###################################### ##### Credits ##### # Source: https://quinston.com/code-snippets/ # Adapted and commented by the author of this Notebook # Auxiliary function that sorts the content of each run, using the insert sort algorithm # Function InsertionSort() takes one argument - an array def InsertionSort(array): # loop through all elements of the array for x in range (1, len(array)): # inner loop for i in range(x, 0, -1): # decrementation # compare current and the next (because decrementation) element if array[i] < array[i - 1]: # swap the values if condition satisfied array[i], array[i - 1] = array[i - 1], array[i] else: break # decrementation i = i - 1 return array # Merge Sort implementation # The Merge() function takes two arguments - two arrays - and merge them together. The function returns yet another array def Merge(aArr, bArr): a = 0 # a is a pointer (index position) of aArr array b = 0 # b is a pointer of bArr array # placeholder - an empty array cArr which will be holding sorted values of aArr and bArr arrays cArr = [] # end of loop codition: while a < len(aArr) and b < len(bArr): # check if a-element of array aArr is less than b-element of array bArr if aArr[a] < bArr[b]: cArr.append(aArr[a]) # if the condition is satisfied, assign the value of a-element to cArr array a = a + 1 # move the pointer to the next aArr array index elif aArr[a] > bArr[b]: cArr.append(bArr[b]) b = b + 1 # in case the a-element of aArra and b-element of bArr are equal else: cArr.append(aArr[a]) cArr.append(bArr[b]) a = a + 1 b = b + 1 # when there are no left elements from bArr to compare with aArr, the remaining elements from aArr are appended at the end of cArr array while a < len(aArr): cArr.append(aArr[a]) a = a + 1 while b < len(bArr): cArr.append(bArr[b]) b = b + 1 # function returns merged the two arrays, sorted return cArr ##### Function definition ##### # Implementation of the TimSort sorting algorithm, with an array to be sorted as an argument. # Funtion TimSort divides the array to be sorted (arr) into smaller chunks of size RUN. # The variable RUN is defined outside the funtion body, prior to its first call. def TimSort(array): # chunk size - the array will be splitted into this size of chunks, usually the chunk size is between 32 and 64 RUN = 32 # divide the array into chunks for x in range(0, len(array), RUN): # loop starting from index 0, to the last element of the array, with incrementing step size RUN; note the value of len(arr) is excluded from the loop # arr[x: x+RUN] is the current slice of the array (from x to x+RUN) # values of the current array slice are transfered (passed) to InsertionSort function; the return from the InsertionSort is already sorted array assigned to the original slice array[x: x+RUN] = InsertionSort(array[x: x+RUN]) # merging the already sortd slices of the array # create an auxiliary variable RUNinc = RUN # define loop termination condition while RUNinc < len(array): # the array is divided into pairs of neighbouring slices and passed to Merge() function for x in range(0, len(array), 2 * RUNinc): # the return from the Merge() function is assigned to the slice (size of 2xRUN) original array array[x: x+2 * RUNinc] = Merge(array[x: x+RUNinc], array[x+RUNinc: x+2*RUNinc]) # incrementation for the while loop RUNinc = RUNinc*2 return array # - # Verify the algorithm works random_list_of_nums = [2, 7, 1, 8, 4, 1] print("Before:",random_list_of_nums) TimSort(random_list_of_nums) print("After:",random_list_of_nums) # ____ # ## 3.1 About the benchmark # ### Algorithms selection # # The five sorting algorithms are selected according to the following criteria: # # 1. A simple comparison-based sort - I have chosen to analyse the **Bubble Sort** # 2. An efficient comparison-based sort - **Quicksort** # 3. A non-comparison sort - **Bucket Sort** # # The remaining two algorithms were left to my choice: # # 4. Another efficient sort - **Merge Sort** # 5. A hybrid sort - **Timsort** # # Additionally, for comparison, the Python's built-in sorting algorithm - **sorted()** method has been also benchmarked. # # Each of the five selected algorithms are briefly described followed by the algorithms implementation in Python and benchmarked. The benchmarking is about running the sorting algorithms for various input size random numbers arrays and measuring the time it takes to run. # # Finally, at the end of the report, the results of the benchmarking process are discussed and key findings presented. # # ### The benchmark conditions # # This project is about assessment of the time complexity of the selected sorting algorithms, or more accurately - their actual implementations. This means measuring the time required for execution, and comparison to each other as well as to the results found in the literature. # # In the benchmark, arrays of randomly generated integers with different input sizes $n$ are used. A variety of different input sizes were selected, as per project brief, i.e. $n$=100, 250, 500, 750, 1000, 1250, 2500, 3750, 5000, 6250, 7500, 8750, 10000 to test the effect of the input size on the running time of each algorithm. The arrays consist of integer numbers randomly generated in a range from 0 to 99. # # Even though the selected sorting algorithms are well known, the tests are subject to numerous factors affecting the timing, and resulting in potential deviation from their expected performances presented in the literature. These factors include among others the array size, the array pre-sorting arrangement, actual algorithms implementation, processor performance, or other processes being performed while executing the sorting algorithm. # # In order to make the benchmarking more accurate, each test will be repeated ten times and the average of the measured times will be considered in the benchmark. Additionally, for the sake of the test credibility, each run for each algorithm and array size is repeated ten times, and the average result taken into analysis. # # Furthermore, as the elements of the arrays are randomised, there is a risk that the arrays would be in various degrees pre-sorted, which may lead to uneven sorting cases. In order to eliminate this, the same arrays’ values (for given array size) were used (by cloning) for each sorting algorithm and each of ten runs within the test. Exactly the same arrays will be used for timing the sorting algorithms, in order to increase the credibility of the benchmarking. # ## 3.2 Benchmark procedure # # The execution times for each algorithm and size of the array being sorted is recorded and stored in a a form of a data table. The dataset is organised into columns corresponding to the sorting algorithms, and rows, representing the size of the array. The type of the dataset is Pandas' _DataFrame_, and is assigned to the variable named `data`. # # Below I am creating an empty dataset with the following headings only. Subsequently, the value of time for each algorithm and array size will be added to the dataset. # # * `Size` - the size of the array, # * `Bubble` - Bubble sort algorithm, # * `Quick` - Quicksort, # * `Bucket` - Bucket sort, # * `Merge` - Merge sort, # * `Tim` - Timsort, # * `Python` - Python's built in method sorted() - for comparison. # # Getting times and collation the results into the dataframe. Each test is run `num_runs` times and the average time is then put into the dataset. # # + # external Python libraries for building and analysing the dataset. import numpy as np # numerical calculations import pandas as pd # data manipulation import time # time stamps # creation of empty data (just headings) data = pd.DataFrame(columns = ["Size", "Bubble", "Quick", "Bucket", "Merge", "Tim", "Python"]) # adding values of the size column to the dataset, assumed arbitrarily, based on the project brief. data["Size"] = (100, 250, 500, 750, 1000, 1250, 2500, 3750, 5000, 6250, 7500, 8750, 10000) # a smaller input sizes for visual inspection and testing purposes #data["Size"] = (10, 20, 30, 40, 50, 60, 70, 80, 90, 100) # - # ### Generating random data arrays # # For each of the array size is populated with randomly generated integer numbers in a range from 0 to 99. # # For each of the selected array sizes, ten different arrays will be generated and stored for the analysis. Exactly the same arrays (clones) will be used for timing the sorting algorithms. # + # generating arrays of random numbers # based on algorithm provided in the project brief def random_array(size): # create an empty array array = [] # populate the arr list with randomly generated numbers for i in range(size): array.append(np.random.randint(0, 100)) # random integer numbers in range from 0 to 99 return array # generation of arrays for each test sizes # create an empty container to hold the set of arrays random_number_arrays = [] # loop through array sizes and for each one assign rundom numbers for each array size (array size is shown in "data" DataFrame, column "size") for array_size in data["Size"]: # create an auxiliary counter representing the index random_number_arrays_index = 0 # call function random_array(), passing as an argument the number of elements to be generated random_number_arrays.append(random_array(array_size)) # increment the counter by 1 random_number_arrays_index += 1 # - # for testing only, show existing arrays, their sizes and first few elements ''' for i in range(len(data["Size"])): print("\nArray #", i+1, "\tSize: ", len(random_number_arrays[i])) for j in range(len(random_number_arrays[i])): if j<5: # print out first 5 elements of the array print(j, "\t", random_number_arrays[i][j]) ''' # #### Bubble Sort # + ####################################### # Bubble Sort Benachmark ####################################### # loop through each array size defined in the data["Size"] column, that is arrays of quantity of elements 100, 250, 500, etc for current_array in range(len(data["Size"])//2): # Note: only half of the input sizes are tested for this sorting algorithm # for testing - show which array is being processed and its number of elements #print("Array #\t", current_array+1, "\tSize:", data.loc[(current_array),"Size"]) # a placeholder to store results for each test intermediate_results = [] # perform the same sorting test several times in order to get the avarage time num_runs = 10 # number of the tests # benchmarking algorithm for r in range(num_runs): # make a copy of the array to preserve the original unsorted order for the remaining runs ar = random_number_arrays[current_array].copy() # for testing - array before sorting #t1 = [] #for j in range(10): # show only 10 first elements of the array # t1.append(ar[j]) #print("\tFirst 10 elements before sorting:\t", t1) # log the start time (time stamp) start_time = time.time() ##### call the sorting implementation to be benchmarked ##### bubble_sort(ar) # for testing - array after sorting #t2 = [] #for j in range(10): # 10 first elements # t2.append(ar[j]) #print("\tFirst 10 elements after sorting:\t", t2) # log the end time (time stamp) end_time = time.time() # calculate the elapsed time time_elapsed = end_time - start_time # for testing - show time of each run #print("Time of run", r+1,":", time_elapsed, "\n") # for each sorting instance, add the time to the below array intermediate_results.append(time_elapsed) # Average result from all runs for the current array size average_result = np.mean(intermediate_results) * 1000 # in milliseconds # for testing - show average time for all analysed array sizes #print("Array size", data.loc[(current_array),"Size"], " \tAverage time of", num_runs, "tests:", average_result) #print() #add the average time to the dataframe data.loc[current_array, "Bubble"] = average_result # - # #### Quicksort benchmark # + ####################################### # Quicksort Benachmark ####################################### # loop through each array size defined in the data["Size"] column, that is arrays of quantity of elements 100, 250, 500, etc for current_array in range(len(data["Size"])): # for testing - show which array is being processed and its number of elements #print("Array #\t", current_array+1, "\tSize:", data.loc[(current_array),"Size"]) # a placeholder to store results for each test intermediate_results = [] # perform the same sorting test several times in order to get the avarage time num_runs = 10 # number of the tests # benchmarking algorithm for r in range(num_runs): # make a copy of the array to preserve the original unsorted order for the remaining runs ar = random_number_arrays[current_array].copy() # for testing - array before sorting #t1 = [] #for j in range(10): # show only 10 first elements of the array # t1.append(ar[j]) #print("\tFirst 10 elements before sorting:\t", t1) # log the start time (time stamp) start_time = time.time() ##### call the sorting implementation to be benchmarked ##### sortowanie_szybkie(ar) # for testing - array after sorting #t2 = [] #for j in range(10): # 10 first elements # t2.append(ar[j]) #print("\tFirst 10 elements after sorting:\t", t2) # log the end time (time stamp) end_time = time.time() # calculate the elapsed time time_elapsed = end_time - start_time # for testing - show time of each run #print("Time of run", r+1,":", time_elapsed, "\n") # for each sorting instance, add the time to the below array intermediate_results.append(time_elapsed) # Average result from all runs for the current array size average_result = np.mean(intermediate_results) * 1000 # in milliseconds # for testing - show average time for all analysed array sizes #print("Array size", data.loc[(current_array),"Size"], " \tAverage time of", num_runs, "tests:", average_result) #print() #add the average time to the dataframe data.loc[current_array, "Quick"] = average_result # - # #### Bucket Sort benchmark # + ####################################### # Bucket Sort Benachmark ####################################### # loop through each array size defined in the data["Size"] column, that is arrays of quantity of elements 100, 250, 500, etc for current_array in range(len(data["Size"])): # Note: the last three arrays tested for this sorting algorithm # for testing - show which array is being processed and its number of elements #print("Array #\t", current_array+1, "\tSize:", data.loc[(current_array),"Size"]) # a placeholder to store results for each test intermediate_results = [] # perform the same sorting test several times in order to get the avarage time num_runs = 10 # number of the tests # benchmarking algorithm for r in range(num_runs): # make a copy of the array to preserve the original unsorted order for the remaining runs ar = random_number_arrays[current_array].copy() # for testing - array before sorting #t1 = [] #for j in range(10): # show only 10 first elements of the array # t1.append(ar[j]) #print("\tFirst 10 elements before sorting:\t", t1) # log the start time (time stamp) start_time = time.time() ##### call the sorting implementation to be benchmarked ##### bucket_sort(ar) # for testing - array after sorting #t2 = [] #for j in range(10): # 10 first elements # t2.append(ar[j]) #print("\tFirst 10 elements after sorting:\t", t2) # log the end time (time stamp) end_time = time.time() # calculate the elapsed time time_elapsed = end_time - start_time # for testing - show time of each run #print("Time of run", r+1,":", time_elapsed, "\n") # for each sorting instance, add the time to the below array intermediate_results.append(time_elapsed) # Average result from all runs for the current array size average_result = np.mean(intermediate_results) * 1000 # in milliseconds # for testing - show average time for all analysed array sizes #print("Array size", data.loc[(current_array),"Size"], " \tAverage time of", num_runs, "tests:", average_result) #print() #add the average time to the dataframe data.loc[current_array, "Bucket"] = average_result # - # #### Merge Sort benchmark # + ####################################### # Merge Sort Benachmark ####################################### # loop through each array size defined in the data["Size"] column, that is arrays of quantity of elements 100, 250, 500, etc for current_array in range(len(data["Size"])): # for testing - show which array is being processed and its number of elements #print("Array #\t", current_array+1, "\tSize:", data.loc[(current_array),"Size"]) # a placeholder to store results for each test intermediate_results = [] # perform the same sorting test several times in order to get the avarage time num_runs = 10 # number of the tests # benchmarking algorithm for r in range(num_runs): # make a copy of the array to preserve the original unsorted order for the remaining runs ar = random_number_arrays[current_array].copy() # for testing - array before sorting #t1 = [] #for j in range(10): # show only 10 first elements of the array # t1.append(ar[j]) #print("\tFirst 10 elements before sorting:\t", t1) # log the start time (time stamp) start_time = time.time() ##### call the sorting implementation to be benchmarked ##### merge_sort(ar) # for testing - array after sorting #t2 = [] #for j in range(10): # 10 first elements # t2.append(ar[j]) #print("\tFirst 10 elements after sorting:\t", t2) # log the end time (time stamp) end_time = time.time() # calculate the elapsed time time_elapsed = end_time - start_time # for testing - show time of each run #print("Time of run", r+1,":", time_elapsed, "\n") # for each sorting instance, add the time to the below array intermediate_results.append(time_elapsed) # Average result from all runs for the current array size average_result = np.mean(intermediate_results) * 1000 # in milliseconds # for testing - show average time for all analysed array sizes #print("Array size", data.loc[(current_array),"Size"], " \tAverage time of", num_runs, "tests:", average_result) #print() #add the average time to the dataframe data.loc[current_array, "Merge"] = average_result # - # #### TimSort benchmark # + ####################################### # <NAME> ####################################### # loop through each array size defined in the data["Size"] column, that is arrays of quantity of elements 100, 250, 500, etc for current_array in range(len(data["Size"])): # for testing - show which array is being processed and its number of elements #print("Array #\t", current_array+1, "\tSize:", data.loc[(current_array),"Size"]) # a placeholder to store results for each test intermediate_results = [] # perform the same sorting test several times in order to get the avarage time num_runs = 10 # number of the tests # benchmarking algorithm for r in range(num_runs): # make a copy of the array to preserve the original unsorted order for the remaining runs ar = random_number_arrays[current_array].copy() # for testing - array before sorting #t1 = [] #for j in range(10): # show only 10 first elements of the array # t1.append(ar[j]) #print("\tFirst 10 elements before sorting:\t", t1) # log the start time (time stamp) start_time = time.time() ##### call the sorting implementation to be benchmarked ##### TimSort(ar) # for testing - array after sorting #t2 = [] #for j in range(10): # 10 first elements # t2.append(ar[j]) #print("\tFirst 10 elements after sorting:\t", t2) # log the end time (time stamp) end_time = time.time() # calculate the elapsed time time_elapsed = end_time - start_time # for testing - show time of each run #print("Time of run", r+1,":", time_elapsed, "\n") # for each sorting instance, add the time to the below array intermediate_results.append(time_elapsed) # Average result from all runs for the current array size average_result = np.mean(intermediate_results) * 1000 # in milliseconds # for testing - show average time for all analysed array sizes #print("Array size", data.loc[(current_array),"Size"], " \tAverage time of", num_runs, "tests:", average_result) #print() #add the average time to the dataframe data.loc[current_array, "Tim"] = average_result # - # #### Python built-in sortied() function benchmark # + ####################################### # Python built-in sorted() Benachmark ####################################### # loop through each array size defined in the data["Size"] column, that is arrays of quantity of elements 100, 250, 500, etc for current_array in range(len(data["Size"])): # for testing - show which array is being processed and its number of elements #print("Array #\t", current_array+1, "\tSize:", data.loc[(current_array),"Size"]) # a placeholder to store results for each test intermediate_results = [] # perform the same sorting test several times in order to get the avarage time num_runs = 10 # number of the tests # benchmarking algorithm for r in range(num_runs): # make a copy of the array to preserve the original unsorted order for the remaining runs ar = random_number_arrays[current_array].copy() # for testing - array before sorting #t1 = [] #for j in range(10): # show only 10 first elements of the array # t1.append(ar[j]) #print("\tFirst 10 elements before sorting:\t", t1) # log the start time (time stamp) start_time = time.time() ##### call the sorting implementation to be benchmarked ##### sorted(ar) # for testing - array after sorting #t2 = [] #for j in range(10): # 10 first elements # t2.append(ar[j]) #print("\tFirst 10 elements after sorting:\t", t2) # log the end time (time stamp) end_time = time.time() # calculate the elapsed time time_elapsed = end_time - start_time # for testing - show time of each run #print("Time of run", r+1,":", time_elapsed, "\n") # for each sorting instance, add the time to the below array intermediate_results.append(time_elapsed) # Average result from all runs for the current array size average_result = np.mean(intermediate_results) * 1000 # in milliseconds # for testing - show average time for all analysed array sizes #print("Array size", data.loc[(current_array),"Size"], " \tAverage time of", num_runs, "tests:", average_result) #print() #add the average time to the dataframe data.loc[current_array, "Python"] = average_result # - # ### Display the acquired data # # The data acquired in the benchmarking was stored in the dataframe named `data`. # # The benchmark results are the average time for each sorting algorithm and array size, expressed in **milliseconds**. # # For a better readability, the time values were formated so that three decimal places are shown. # + # Apply formating to three decimal places # Source: https://stackoverflow.com/a/42735712 pd.options.display.float_format = '{:,.3f}'.format # transpose the table, source: https://stackoverflow.com/a/31328974 d = data.transpose().reset_index().rename(columns={'index':'Size'}) new_header = d.iloc[0] #grab the first row for the header d = d[1:] #take the data less the header row d.columns = new_header #set the header row as the df header # show the results in a table format d # - # ### Results visualisation # The results of the benchmarking analysis are plotted below in a chart, where the relationship between the input size and time can be seen. # The x-axis represents the size of the input array, and the y-axis - time it took to sort the array. Each sorting algorithm is shown in a different colour. # + import matplotlib.pyplot as plt #import seaborn as sns #import scipy.stats as stats # below command will allow for the plots being displayed inside the Notebook, rather than in a separate screen. # %matplotlib inline ### Results visualisation import matplotlib.pyplot as plt #import seaborn as sns #import scipy.stats as stats # below command will allow for the plots being displayed inside the Notebook, rather than in a separate screen. # %matplotlib inline # Setting up the plotting output plt.plot(data['Size'], data['Bubble'], label='Bubble Sort') plt.plot(data['Size'], data['Quick'], label='Quicksort') plt.plot(data['Size'], data['Bucket'], label='Bucket Sort') plt.plot(data['Size'], data['Merge'], label='Merge Sort') plt.plot(data['Size'], data['Tim'], label='Timsort') plt.plot(data['Size'], data['Python'], label='Python Sort') # Adding title, labels and legend plt.title("Measured Time Complexity") plt.xlabel("Input Size") plt.ylabel("Time, milliseconds") plt.legend() # set axis ranges plt.xlim(0,10050) plt.ylim(0,105) # set size of the plot in inches (default dpi=100) plt.gcf().set_size_inches(12, 6) # plot the graph plt.show() # - # ___ # ## 4. Findings / Discussion # From the above it is clear that the Bubble Sort is by far the least efficient of all tested. Although the Bubble sort algorithm is relatively simple, its application for larger input sizes becomes hardly justified. From the chart, it appears the curve takes the shape of a quadratic function. This corresponds to the expected O(n2) time complexity of the algorithm. # # he next two least efficient algorithms are the Timsort and the Merge sort. The times measured for these two are very similar, although Merge Sort is more efficient. The shape of the curves are difficult to identify, but they appear to follow a “wide” O(n logn) shape, which are the expected results for the algorithms. It is interesting, however, when comparing the above tested Timsort implementation against Python’s built-in sorting algorithm, sorted(), which also uses the Timsort algorithm and is much more efficient. This direct comparison demonstrates the importance of actual implementation of an algorithm. Also, it is worth noting that the sorted() method was originally written in C language ([source code](https://github.com/python/cpython/blob/master/Python/bltinmodule.c#L2222)). # # The Quick sort is significantly better than the other three already discussed. The plot of their efficiency is similar. It is interesting to see the curve of the Quicksort in relation to the Merge sort one. They both are classified in the literature (see the algorithms description above) as O(n logn) time complexity. However, the former (Quick) outperforms the later (Merge). # # The Bucket sort is the last of the tested algorithms. It showed the best performance of all the tested algorithms (excluding sorted(), which was added as reference only), although only marginally better than the Quicksort. This algorithm is the only one from the selected for benchmarking that is classified as non-comparison based. Its supposed time complexity is of O(nk) degree, where k is the number of the buckets. The algorithm performance seems to be accurately captured in this benchmarking. # # ___ # ## References # #### Lectures materials # * https://learnonline.gmit.ie/course/view.php?id=1696 # * https://www.comp.nus.edu.sg/~stevenha/cs1020e/lectures/L10%20-%20Sorting.pdf # * https://www.cs.princeton.edu/~rs/AlgsDS07/04Sorting.pdf # # #### Books # * <NAME> et. al, Introduction to Algorithms, 2009 (https://en.wikipedia.org/wiki/Introduction_to_Algorithms) # * <NAME>, The Art of Computer Programming, Vol. 3: Sorting and Searching, 1998, (https://en.wikipedia.org/wiki/The_Art_of_Computer_Programming) # * <NAME>, <NAME>, Algorithms, 2011 (https://algs4.cs.princeton.edu/home/) # * <NAME> et al, Algorithms in a Nutshell, 2016, (https://www.amazon.com/Algorithms-Nutshell-Practical-George-Heineman/dp/1491948922) # * <NAME> and <NAME>, Problem Solving with Algorithms and Data Structures using Python, 2005 (https://runestone.academy/runestone/books/published/pythonds/index.html) # # #### Papers # * <NAME>, Analyzing Code with Thetha, Oh and Omega, 2017, http://www2.unb.ca/~owen/courses/2383-2018/using-big-theta.pdf # * <NAME> & <NAME>, NK-Sorting Algorithm, 2011, https://www.iasj.net/iasj?func=fulltext&aId=36934 # * <NAME>, Searching and Sorting Algorithms, 2004, http://www.cs.carleton.edu/faculty/adalal/teaching/f04/117/notes/searchSort.pdf # * <NAME> et. al, On the Worst-Case Complexity of TimSort, 2018, https://drops.dagstuhl.de/opus/volltexte/2018/9467/pdf/LIPIcs-ESA-2018-4.pdf # # # #### Websites (visited in April 2020) # * https://docs.python.org/3.8/howto/sorting.html # * https://en.wikipedia.org/wiki/Sorting_algorithm # * https://en.wikipedia.org/wiki/Asymptotic_analysis # * https://en.wikipedia.org/wiki/Big_O_notation # * https://www.hackerearth.com/practice/algorithms/sorting/ # * https://www.khanacademy.org/computing/computer-science/algorithms/ # * https://bugs.python.org/file4451/timsort.txt # * https://hackernoon.com/timsort-the-fastest-sorting-algorithm-youve-never-heard-of-36b28417f399 # * https://guide.freecodecamp.org/algorithms/ # * https://medium.com/@rscheiwe/the-case-for-timsort-349d5ce1e414 # * https://hackernoon.com/timsort-the-fastest-sorting-algorithm-youve-never-heard-of-36b28417f399 # * http://www.programming-algorithms.net/article/41160/Bucket-sort # * https://www.quora.com/What-is-sorting-2 # * https://www.bigocheatsheet.com/ # * https://www.khanacademy.org/computing/computer-science/algorithms/asymptotic-notation/a/big-o-notation # * http://www.programming-algorithms.net/article/41160/Bucket-sort # * https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/tutorial/ # * https://en.wikipedia.org/wiki/Timsort # * https://bugs.python.org/file4451/timsort.txt # * https://medium.com/@rscheiwe/the-case-for-timsort-349d5ce1e414 # * https://medium.com/@george.seif94/this-is-the-fastest-sorting-algorithm-ever-b5cee86b559c # * https://hackernoon.com/timsort-the-fastest-sorting-algorithm-youve-never-heard-of-36b28417f399 # * https://github.com/python/cpython/blob/master/Python/bltinmodule.c#L2222 # # # # ___ # <NAME>, 2020
Benchmark_sorting_algorithms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Organization: Matrix Structure # # >**Reference**: <NAME>, <NAME>, <NAME>, 2016. [*Temporal regularized matrix factorization for high-dimensional time series prediction*](http://www.cs.utexas.edu/~rofuyu/papers/tr-mf-nips.pdf). 30th Conference on Neural Information Processing Systems (*NIPS 2016*), Barcelona, Spain. # # We consider a dataset of $m$ discrete time series $\boldsymbol{y}_{i}\in\mathbb{R}^{f},i\in\left\{1,2,...,m\right\}$. The time series may have missing elements. We express spatio-temporal dataset as a matrix $Y\in\mathbb{R}^{m\times f}$ with $m$ rows (e.g., locations) and $f$ columns (e.g., discrete time intervals), # # $$Y=\left[ \begin{array}{cccc} y_{11} & y_{12} & \cdots & y_{1f} \\ y_{21} & y_{22} & \cdots & y_{2f} \\ \vdots & \vdots & \ddots & \vdots \\ y_{m1} & y_{m2} & \cdots & y_{mf} \\ \end{array} \right]\in\mathbb{R}^{m\times f}.$$ # # # Temporal Regularized Matrix Factorization(TRMF) # Temporal Regularized Matrix Factorization (TRMF) framework is an approach to incorporate temporal dependencies into matrix factorization models which use well-studied time series models to describe temporal dependencies # among ${\boldsymbol{x}_t}$ explicitly.Such models take the form: # # $$\boldsymbol{x}_{t}\approx\sum_{l\in\mathcal{L}}\boldsymbol{\theta}_{l}\circledast\boldsymbol{x}_{t-l}$$ # # where this autoregressive (AR) is specialized by a lag set $\mathcal{L}=\left\{l_1,l_2,...,l_d\right\}$ (e.g., $\mathcal{L}=\left\{1,2,144\right\}$) and weights $\boldsymbol{\theta}_{l}\in\mathbb{R}^{r},\forall l$, and we further define # # $$\mathcal{R}_{AR}\left(X\mid \mathcal{L},\Theta,\eta\right)=\frac{1}{2}\sum_{t=l_d+1}^{f}\left(\boldsymbol{x}_{t}-\sum_{l\in\mathcal{L}}\boldsymbol{\theta}_{l}\circledast\boldsymbol{x}_{t-l}\right)^T\left(\boldsymbol{x}_{t}-\sum_{l\in\mathcal{L}}\boldsymbol{\theta}_{l}\circledast\boldsymbol{x}_{t-l}\right)+\frac{\eta}{2}\sum_{t=1}^{f}\boldsymbol{x}_{t}^T\boldsymbol{x}_{t}.$$ # # Thus, TRMF-AR is given by solving # # $$\min_{W,X,\Theta}\frac{1}{2}\underbrace{\sum_{(i,t)\in\Omega}\left(y_{it}-\boldsymbol{w}_{i}^T\boldsymbol{x}_{t}\right)^2}_{\text{sum of squared residual errors}}+\lambda_{w}\underbrace{\mathcal{R}_{w}\left(W\right)}_{W-\text{regularizer}}+\lambda_{x}\underbrace{\mathcal{R}_{AR}\left(X\mid \mathcal{L},\Theta,\eta\right)}_{\text{AR-regularizer}}+\lambda_{\theta}\underbrace{\mathcal{R}_{\theta}\left(\Theta\right)}_{\Theta-\text{regularizer}}$$ # # where $\mathcal{R}_{w}\left(W\right)=\frac{1}{2}\sum_{i=1}^{m}\boldsymbol{w}_{i}^T\boldsymbol{w}_{i}$ and $\mathcal{R}_{\theta}\left(\Theta\right)=\frac{1}{2}\sum_{l\in\mathcal{L}}\boldsymbol{\theta}_{l}^T\boldsymbol{\theta}_{l}$ are regularization terms. # # Matrix Computation Concepts # # ## Kronecker product # # - **Definition**: # # Given two matrices $A\in\mathbb{R}^{m_1\times n_1}$ and $B\in\mathbb{R}^{m_2\times n_2}$, then, the **Kronecker product** between these two matrices is defined as # # $$A\otimes B=\left[ \begin{array}{cccc} a_{11}B & a_{12}B & \cdots & a_{1m_2}B \\ a_{21}B & a_{22}B & \cdots & a_{2m_2}B \\ \vdots & \vdots & \ddots & \vdots \\ a_{m_11}B & a_{m_12}B & \cdots & a_{m_1m_2}B \\ \end{array} \right]$$ # where the symbol $\otimes$ denotes Kronecker product, and the size of resulted $A\otimes B$ is $(m_1m_2)\times (n_1n_2)$ (i.e., $m_1\times m_2$ columns and $n_1\times n_2$ rows). # # - **Example**: # # If $A=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ \end{array} \right]$ and $B=\left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10 \\ \end{array} \right]$, then, we have # # $$A\otimes B=\left[ \begin{array}{cc} 1\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] & 2\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] \\ 3\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] & 4\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] \\ \end{array} \right]$$ # # $$=\left[ \begin{array}{cccccc} 5 & 6 & 7 & 10 & 12 & 14 \\ 8 & 9 & 10 & 16 & 18 & 20 \\ 15 & 18 & 21 & 20 & 24 & 28 \\ 24 & 27 & 30 & 32 & 36 & 40 \\ \end{array} \right]\in\mathbb{R}^{4\times 6}.$$ # # ## Khatri-Rao product (`kr_prod`) # # - **Definition**: # # Given two matrices $A=\left( \boldsymbol{a}_1,\boldsymbol{a}_2,...,\boldsymbol{a}_r \right)\in\mathbb{R}^{m\times r}$ and $B=\left( \boldsymbol{b}_1,\boldsymbol{b}_2,...,\boldsymbol{b}_r \right)\in\mathbb{R}^{n\times r}$ with same number of columns, then, the **Khatri-Rao product** (or **column-wise Kronecker product**) between $A$ and $B$ is given as follows, # # $$A\odot B=\left( \boldsymbol{a}_1\otimes \boldsymbol{b}_1,\boldsymbol{a}_2\otimes \boldsymbol{b}_2,...,\boldsymbol{a}_r\otimes \boldsymbol{b}_r \right)\in\mathbb{R}^{(mn)\times r}$$ # where the symbol $\odot$ denotes Khatri-Rao product, and $\otimes$ denotes Kronecker product. # # - **Example**: # # If $A=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ \end{array} \right]=\left( \boldsymbol{a}_1,\boldsymbol{a}_2 \right) $ and $B=\left[ \begin{array}{cc} 5 & 6 \\ 7 & 8 \\ 9 & 10 \\ \end{array} \right]=\left( \boldsymbol{b}_1,\boldsymbol{b}_2 \right) $, then, we have # # $$A\odot B=\left( \boldsymbol{a}_1\otimes \boldsymbol{b}_1,\boldsymbol{a}_2\otimes \boldsymbol{b}_2 \right) $$ # # $$=\left[ \begin{array}{cc} \left[ \begin{array}{c} 1 \\ 3 \\ \end{array} \right]\otimes \left[ \begin{array}{c} 5 \\ 7 \\ 9 \\ \end{array} \right] & \left[ \begin{array}{c} 2 \\ 4 \\ \end{array} \right]\otimes \left[ \begin{array}{c} 6 \\ 8 \\ 10 \\ \end{array} \right] \\ \end{array} \right]$$ # # $$=\left[ \begin{array}{cc} 5 & 12 \\ 7 & 16 \\ 9 & 20 \\ 15 & 24 \\ 21 & 32 \\ 27 & 40 \\ \end{array} \right]\in\mathbb{R}^{6\times 2}.$$ def kr_prod(a, b): return np.einsum('ir, jr -> ijr', a, b).reshape(a.shape[0] * b.shape[0], -1) import numpy as np A = np.array([[1, 2], [3, 4]]) B = np.array([[5, 6], [7, 8], [9, 10]]) print(kr_prod(A, B)) def TRMF(dense_mat, sparse_mat, W, X, theta, time_lags, lambda_w, lambda_x, lambda_theta, eta, maxiter): dim1 = sparse_mat.shape[0] dim2 = sparse_mat.shape[1] binary_mat = np.zeros((dim1,dim2)) position = np.where((sparse_mat > 0)) binary_mat[position] = 1 pos = np.where((dense_mat > 0) & (sparse_mat == 0)) d = len(time_lags) r = theta.shape[1] mape = np.zeros(maxiter) rmse = np.zeros(maxiter) for iter in range(maxiter): var1 = X.T; var2 = kr_prod(var1,var1) var3 = np.matmul(var2,binary_mat.T) var4 = np.matmul(var1,sparse_mat.T) for i in range(dim1): W[i,:] = np.matmul(np.linalg.inv((var3[:,i].reshape([r,r]))+lambda_w * np.eye(r)), var4[:,i]) var1 = W.T var2 = kr_prod(var1,var1) var3 = np.matmul(var2, binary_mat) var4 = np.matmul(var1, sparse_mat) for t in range(dim2): Mt = np.zeros((r,r)) Nt = np.zeros(r) if t < max(time_lags): Pt = np.zeros((r,r)) Qt = np.zeros(r) else: Pt = np.eye(r) Qt = np.einsum('ij, ij -> j', theta, X[t - time_lags, :]) if t < dim2 - np.min(time_lags): if t >= np.max(time_lags) and t < dim2 - np.max(time_lags): index = list(range(0, d)) else: index = list(np.where((t + time_lags >= np.max(time_lags)) & (t + time_lags < dim2)))[0] for k in index: theta0 = theta.copy() theta0[k, :] = 0 Mt = Mt + np.diag(theta[k, :]**2); Nt = Nt + np.multiply(theta[k,:],(X[t+time_lags[k], :] - np.einsum('ij, ij -> j', theta0, X[t + time_lags[k] - time_lags, :]))) X[t,:] = np.matmul(np.linalg.inv(var3[:, t].reshape([r,r]) + lambda_x * Pt + lambda_x * Mt + lambda_x * eta * np.eye(r)), (var4[:, t] + lambda_x * Qt + lambda_x * Nt)) elif t >= dim2 - np.min(time_lags): X[t, :] = np.matmul(np.linalg.inv(var3[:, t].reshape([r, r]) + lambda_x * Pt + lambda_x * eta * np.eye(r)), (var4[:, t] + Qt)) for k in range(d): var1 = X[np.max(time_lags) - time_lags[k] : dim2 - time_lags[k], :] var2 = np.linalg.inv(np.diag(np.einsum('ij, ij -> j', var1, var1)) + (lambda_theta / lambda_x) * np.eye(r)) var3 = np.zeros(r) for t in range(np.max(time_lags) - time_lags[k], dim2 - time_lags[k]): var3 = var3 + np.multiply(X[t, :], (X[t + time_lags[k], :] - np.einsum('ij, ij -> j', theta, X[t + time_lags[k] - time_lags, :]) +np.multiply(theta[k, :], X[t,:]))) theta[k, :] = np.matmul(var2,var3) mat_hat = np.matmul(W, X.T) mape[iter] = np.sum(np.abs(dense_mat[pos] - mat_hat[pos]) / dense_mat[pos]) / dense_mat[pos].shape[0] rmse[iter] = np.sqrt(np.sum((dense_mat[pos] - mat_hat[pos])**2)/dense_mat[pos].shape[0]) return W, X, theta def st_prediction(dense_mat, sparse_mat, time_lags, lambda_w, lambda_x, lambda_theta, eta, rank, pred_time_steps, back_steps, maxiter1, maxiter2): start_time = dense_mat.shape[1] - pred_time_steps dense_mat0 = dense_mat[:, 0 : start_time] sparse_mat0 = sparse_mat[:, 0 : start_time] dim1 = sparse_mat0.shape[0] dim2 = sparse_mat0.shape[1] mat_hat = np.zeros((dim1, pred_time_steps)) W = 0.1 * np.random.randn(dim1, rank) X = 0.1 * np.random.randn(dim2, rank) theta = 0.1 * np.random.randn(d, rank) W, X, theta = TRMF(dense_mat0, sparse_mat0, W, X, theta, time_lags, lambda_w, lambda_x, lambda_theta, eta, maxiter1) W_p = W.copy() theta_p = theta.copy() X0 = np.zeros((dim2 + 1, rank)) X0[0 : dim2, :] = X.copy() X0[dim2, :] = np.einsum('ij, ij -> j', theta, X0[dim2 - time_lags, :]) X_p = X0[X0.shape[0] - back_steps : X0.shape[0], :] mat_hat[:, 0] = np.matmul(W, X0[dim2, :]) for t in range(1, pred_time_steps): dense_mat1 = dense_mat[:, start_time - back_steps + t : start_time + t] sparse_mat1 = sparse_mat[:, start_time - back_steps + t : start_time + t] W, X, theta = TRMF(dense_mat1, sparse_mat1, W_p, X_p, theta_p, time_lags, lambda_w, lambda_x, lambda_theta, eta, maxiter2) W_p = W.copy() theta_p = theta.copy() X0 = np.zeros((back_steps + 1, rank)) X0[0 : back_steps, :] = X.copy() X0[back_steps, :] = np.einsum('ij, ij -> j', theta, X0[back_steps - time_lags, :]) X_p = X0[1: back_steps + 1, :] mat_hat[:, t] = np.matmul(W, X0[back_steps, :]) if (t + 1) % 40 == 0: print('Time step: {}'.format(t + 1)) small_dense_mat = dense_mat[:, start_time : dense_mat.shape[1]] pos = np.where(small_dense_mat > 0) final_mape = np.sum(np.abs(small_dense_mat[pos] - mat_hat[pos])/small_dense_mat[pos])/small_dense_mat[pos].shape[0] final_rmse = np.sqrt(np.sum((small_dense_mat[pos] - mat_hat[pos]) ** 2)/small_dense_mat[pos].shape[0]) print('Final MAPE: {:.6}'.format(final_mape)) print('Final RMSE: {:.6}'.format(final_rmse)) print() return mat_hat # + import scipy.io tensor = scipy.io.loadmat('Hangzhou-data-set/tensor.mat') tensor = tensor['tensor'] random_matrix = scipy.io.loadmat('Hangzhou-data-set/random_matrix.mat') random_matrix = random_matrix['random_matrix'] random_tensor = scipy.io.loadmat('Hangzhou-data-set/random_tensor.mat') random_tensor = random_tensor['random_tensor'] dense_mat = tensor.reshape([tensor.shape[0], tensor.shape[1] * tensor.shape[2]]) missing_rate = 0.2 # ============================================================================= ### Random missing (RM) scenario ### Set the RM scenario by: # binary_mat = np.round(random_tensor + 0.5 - missing_rate).reshape([random_tensor.shape[0], # random_tensor.shape[1] # * random_tensor.shape[2]]) # ============================================================================= # ============================================================================= ### Non-random missing (NM) scenario ### Set the NM scenario by: binary_tensor = np.zeros(tensor.shape) for i1 in range(tensor.shape[0]): for i2 in range(tensor.shape[1]): binary_tensor[i1,i2,:] = np.round(random_matrix[i1,i2] + 0.5 - missing_rate) binary_mat = binary_tensor.reshape([binary_tensor.shape[0], binary_tensor.shape[1] * binary_tensor.shape[2]]) # ============================================================================= sparse_mat = np.multiply(dense_mat, binary_mat) # + import time start = time.time() pred_time_steps = 144 * 5 back_steps = 144 * 2 time_lags = np.array([1, 2, 108]) dim1, dim2 = sparse_mat.shape rank = 20 lambda_w = 7 lambda_x = 7 lambda_theta = 7 eta = 0.03 d = time_lags.shape[0] maxiter1 = 1000 maxiter2 = 200 #sparse matrix scenario #mat_hat = st_prediction(dense_mat, sparse_mat, time_lags, lambda_w, lambda_x, lambda_theta, eta, rank, pred_time_steps, back_steps, maxiter1, maxiter2) #dense matrix scenario mat_hat = st_prediction(dense_mat, dense_mat, time_lags, lambda_w, lambda_x, lambda_theta, eta, rank, pred_time_steps, back_steps, maxiter1, maxiter2) end = time.time() print('Running time: %d seconds'%(end - start)) # - import matplotlib.pyplot as plt plt.figure(figsize = (12, 2.5)) road = 3 plt.plot(Xt[road, :], 'r', small_dense_mat[road, :], 'b') plt.show() a = np.array([1,3,2,4,5]) B = np.diag(a) C = np.linalg.inv(B) C # **Experiment results** of spatial-temporal data prediction using online TRMF: # # | scenario |`rank`|`Lambda_w`|`Lambda_x`|`Lambda_theta`|`eta`|`maxiter`|`back step`| mape | rmse | # |:----------|-----:|---------:|---------:|-------------:|----:|---------:|----------:|-------------:|------------:| # |**Original data**| 10 | 5 | 5 | 5 | 0.03|(1000,200)| 144 * 2 | **0.235035**| **35.0231**| # |**20%, RM**| 10 | 5 | 5 | 5 | 0.03|(1000,200)| 144 * 2 | **0.255117**| **42.2614**| # |**20%, RM**| 10 | 6 | 6 | 6 | 0.03|(1000,200)| 144 * 2 | **0.248802**| **41.2168**| # |**20%, RM**| 10 | 7 | 7 | 7 | 0.03|(1000,200)| 144 * 2 | **0.245255**| **39.8855**| # |**20%, RM**| 10 | 7 | 7 | 7 | 0.04|(1000,200)| 144 * 2 | **0.253674**| **40.8621**| # |**20%, RM**| 15 | 7 | 7 | 7 | 0.03|(1000,200)| 144 * 2 | **0.234144**| **35.4999**| # |**20%, RM**| 15 | 7 | 7 | 7 | 0.04|(1000,200)| 144 * 2 | **0.251952**| **40.2934**| # |**20%, RM**| 20 | 7 | 7 | 7 | 0.03|(1000,200)| 144 * 2 | **0.236752**| **35.1934**| # |**20%, RM**| 20 | 7 | 7 | 7 | 0.04|(1000,200)| 144 * 2 | **0.259573**| **42.508**| # |**20%, RM**| 25 | 7 | 7 | 7 | 0.03|(1000,200)| 144 * 2 | **0.240533**| **35.8572**| # |**20%, RM**| 10 | 10 | 10 | 10 | 0.03|(1000,200)| 144 * 2 | **0.248508**| **40.1612**| # |**20%, RM**| 10 | 900 | 900 | 900 | 0.03|(1000,200)| 144 * 2 | **0.362184**| **63.965**| # |**40%, RM**| 20 | 7 | 7 | 7 | 0.03|(1000,200)| 144 * 2 | **0.305683**| **47.9195**| # |**20%, NM**| 20 | 7 | 7 | 7 | 0.03|(1000,200)| 144 * 2 | **0.235035**| **35.0231**| # |**40%, NM**| 20 | 7 | 7 | 7 | 0.03|(1000,200)| 144 * 2 | **0.363618**| **60.5493**| # # > The experiment relies on the *Urban traffic speed data set in Hangzhou, China*.
toy-examples/Prediction-ST-TRMF-Hdata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # First try at using Selenium # source : https://www.youtube.com/watch?v=NhRx99uFUNk from selenium import webdriver from webdriver_manager.chrome import ChromeDriverManager # + #driver = webdriver.Chrome(ChromeDriverManager().install()) # - options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--incognito')# access the browser in incognito mode #options.add_argument('--headless') # access the browser without having to open it driver = webdriver.Chrome("C:/webdrivers/chromedriver.exe", options=options) driver.get("https://nunzioweb.com/iframes-example.htm") # ** Get the iframes of the website ** iframes = driver.find_elements_by_tag_name('iframe') iframes driver.switch_to.frame(iframes[0]) driver.find_element_by_id('mep_0') driver.find_element_by_id('mep_0').get_attribute('class') driver.switch_to.default_content() # run that before accessing any other element # ** Getting the img element out of the site** driver.switch_to.frame(iframes[1]) print(driver.find_element_by_tag_name('img')) driver.switch_to.default_content() # ** Shit ain't working correctly ... for now ** driver.switch_to.frame(iframes[2]) print(driver.find_element_by_link_text("Slick City").click()) driver.switch_to.default_content()
Selenium/Selenium HelloWorld.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # nba_tune_model # # ### Uses grid search to select optimal parameters for random forest model # + # Import dependencies import numpy as np np.set_printoptions(suppress=True) import pandas as pd from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV, train_test_split # + # Preprocess data for random forest # ---------------------------- # max_depth = [] max_features = [] min_samples_leaf = [] def rfPreprocess(): # Read data and turn binary columns to 0/1 results = pd.read_csv('..\\data\\results.csv') results['teamRslt'] = [1 if x == 'Win' else 0 for x in results['teamRslt']] # Win = 1, Loss = 0 results['teamLoc'] = [1 if x == 'Home' else 0 for x in results['teamLoc']] # Home = 1, Away = 0 # X/Y and train/test split Y = results['teamRslt'] X = results.loc[:, ['teamLoc', 'diff_starting_WS', 'diff_starting_BPM', 'diff_starting_MP_per_game', 'diff_starting_GSpct', 'diff_bench_WS', 'diff_bench_BPM', 'diff_bench_MP_per_game', 'diff_bench_GSpct', 'diff_WinPct2', 'timeSincePrev', 'distSincePrev']] X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25) # Separate PCA cols PCA_cols_starting = ['diff_starting_WS', 'diff_starting_MP_per_game', 'diff_starting_GSpct'] PCA_cols_bench = ['diff_bench_WS', 'diff_bench_MP_per_game', 'diff_bench_GSpct'] X_train_PCA_cols_starting = X_train.loc[:, PCA_cols_starting] X_train_PCA_cols_bench = X_train.loc[:, PCA_cols_bench] # PCA on starter metrics pca_starters = PCA(n_components=0.75, random_state=1) pca_starters.fit(X_train_PCA_cols_starting) X_train_PCA_cols_starting_transformed = pd.Series(pca_starters.transform(X_train_PCA_cols_starting)[:,0]) # PCA on bench metrics pca_bench = PCA(n_components=0.75, random_state=1) pca_bench.fit(X_train_PCA_cols_bench) X_train_PCA_cols_bench_transformed = pd.Series(pca_bench.transform(X_train_PCA_cols_bench)[:,0]) # Combine PCA cols with non-PCA cols (training) X_train_noPCA = X_train.drop(PCA_cols_starting+PCA_cols_bench, axis=1).reset_index() X_train_new = pd.concat([X_train_noPCA, X_train_PCA_cols_starting_transformed, X_train_PCA_cols_bench_transformed], axis=1) X_train_new = X_train_new.rename(columns = {0:'PC_starters', 1:'PC_bench'}) X_train_new = X_train_new.drop('index', axis=1) # Fit random forest model on PCA'd data # Random forest (w/ Grid Search) params = { 'max_depth':[3,4,5], 'max_features':[3,4,5], 'min_samples_leaf':[5,7,9], 'n_estimators':[251], } RF_classifier = RandomForestClassifier() grid_search_RF = GridSearchCV(estimator=RF_classifier, param_grid=params, cv=5, verbose=1, n_jobs=-1) grid_search_RF.fit(X_train_new, Y_train) # Save best parameters to list max_depth.append(grid_search_RF.best_params_['max_depth']) max_features.append(grid_search_RF.best_params_['max_features']) min_samples_leaf.append(grid_search_RF.best_params_['min_samples_leaf']) # - # Perform grid search 25 times for i in range(25): print(i) rfPreprocess() # Print optimal parameters print('Max Depth') print(pd.Series(max_depth).value_counts()) print('\n') print('Max Features') print(pd.Series(max_features).value_counts()) print('\n') print('Min Samples Leaf') print(pd.Series(min_samples_leaf).value_counts())
python/nba_tune_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/carlosdg/TrainStyleTransfer/blob/master/TrainFastStyleTransfer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="jf4cIi4Ei0Un" colab_type="text" # # Training Fast Style Transfer for a ml5js model # # We are just following the guide in the [_training-styletransfer_ repository of ml5](https://github.com/ml5js/training-styletransfer) but in google colab. This was done in January 25, 2019. We say this because time goes on and you may find some differences due to different versions. At this time the repository has 16 commits and we used the model successfully on ml5js version 0.1.1 # # __The first step is very important: we need to set the environment to use the GPU__. Go to the `Runtime` tab (up in the window, below the notebook name) and select `Change runtime type`. # # <p align="center"> # <img # src="https://drive.google.com/uc?export=view&id=1uGKJdbcWIV_ym2zi63Z-TfWLAcQyy4c2" # alt="Screenshot showing where the change runtime type is" # width="360" # /> # </p> # # There select `GPU` and click `Save`. # # <p align="center"> # <img # src="https://drive.google.com/uc?export=view&id=1hucS-ZADr3ucmVt9U8vicNNjxZ9U4d2_" # alt="Screenshot showing the notebook settings with GPU selected as hardware accelerator option" # width="360" # /> # </p> # # # Now that we are ready to use the GPU we clone the repository: # + id="DdwxMbTF36Mr" colab_type="code" outputId="86d3fc6a-ea0a-4b0b-b227-49864678f78d" colab={"base_uri": "https://localhost:8080/", "height": 85} # !git clone https://github.com/ml5js/training_styletransfer.git # + [markdown] id="7jjshJKwjinI" colab_type="text" # ## 2. Download & unzip the COCO dataset # # Now we download the dataset of images that will be used in training. Note that this will take some minutes to finish. # # In the guide we are told to execute `bash setup.sh` but the unzip command is verbose and annoying (for us at least), so the following commands are the same ones in `setup.sh` but with a quiet unzip (note the `-qq` option) and without the `mkdir` because the repository already have the folders created # + id="05O8OzM54XtI" colab_type="code" outputId="f7d06bb4-08f3-4c62-c998-6be5b296720c" colab={"base_uri": "https://localhost:8080/", "height": 391} # !cd training_styletransfer/data && \ # wget http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat && \ # wget http://msvocds.blob.core.windows.net/coco2014/train2014.zip && \ # unzip -qq train2014.zip # + [markdown] id="3PCpiZe-p0C8" colab_type="text" # ## 3. Upload the style image # # Go to the left pane in here. If it is not already open you should see something like an arrow pointing towards the notebook, click there to open the pane. # # <p align="center"> # <img # alt="Screenshot showing the arrow to open the pane" # src="https://drive.google.com/uc?export=view&id=10xRTTEhLkwKgACgsQCoypI9FPqbz-Udw" # width="360" # /> # </p> # # Once open you should see a `Files` tab. Then you can see the tree directory of the virtual machine. You can also see an `Upload` option # # <p align="center"> # <img # alt="Screenshot showing the Files tab and Upload button" # src="https://drive.google.com/uc?export=view&id=10WglV3FgxqoAb9JN4DcAX1BxcgU5NZIZ" # width="360" # /> # </p> # # Click there and upload your style image (it should be uploaded in the folder as `training_styletransfer` and `sample_data`). # # <p align="center"> # <img # alt="Screenshot showing where the style image should be" # src="https://drive.google.com/uc?export=view&id=1JpZnWzRV2suwGeo5bg3BGvUUnj1yvqHL" # width="360" # /> # </p> # # Make sure that the style image has a reasonable size. We used images with dimensions about 600x300. # + [markdown] id="p-f9mzALpKUv" colab_type="text" # ## 4. Run the training script # # Here you have to __change `style.jpg` with the style image name that you uploaded__ to colab (or have the style name be `style.jpg`). # # Note that this will take a long time, about 6 to 8 hours. Make sure to come back every now and then to check that virtual machine is still running. # # We have successfully trained 4 models and with all the available GPU memory for this process we had no problem 👍. But keep in mind that the virtual machines are recycled after 12 hours. So, in our case, after we trained a style we waited for the next day to train another one. # + id="5ypMMcwQ5URC" colab_type="code" outputId="b1770eca-20fe-4c07-c2f5-dffce8b1f69a" colab={"base_uri": "https://localhost:8080/", "height": 683} # !cd training_styletransfer && \ # python style.py --style ../style.jpg \ # --checkpoint-dir checkpoints/ \ # --model-dir models/ \ # --test images/violetaparra.jpg \ # --test-dir tests/ \ # --content-weight 1.5e1 \ # --checkpoint-iterations 1000 \ # --batch-size 20 # + [markdown] id="EaP1Y0dvpf8K" colab_type="text" # ## 6. Zip & Download the model # # Here we make a zip with the model so we can go to the left pane, and in the `files` tab right click `result.zip` and download it. If you don't see `result.zip` try clicking on the `Update` button next to `Upload` to refresh the `Files` tab # + id="ZbdyBCUmaPH2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 884} outputId="8e96b12f-f33a-461b-adc8-7b6eeaa62fde" # !zip -r result.zip training_styletransfer/models
TrainFastStyleTransfer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import numpy as np import sys import matplotlib.pyplot as plt import simulate import inference # %matplotlib inline np.random.seed(1) # + # parameter setting: n = 100 # number of variables g = 2.0 # coupling variability parameter w0 = np.random.normal(0.0,g/np.sqrt(n),size=(n,n)) # - # The heat map of the actual coupling matrix `w0` is plotted: plt.figure(figsize=(3,3)) plt.title('actual coupling matrix') plt.imshow(w0,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.5,0.5) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.5,0,0.5]) plt.show() # Using the function `simulate.generate_data`, we then generate a time series of variable states according to the kinetic Ising model with a data length $L = 2000$. l = 2000 s = simulate.generate_data(w0,l) # + n = s.shape[1] h0 = np.zeros(n) w = np.zeros((n,n)) for i0 in range(n): x = s[:-1,:] y = s[1:,i0] h0[i0],w[i0,:] = inference.fit(x,y) # + plt.figure(figsize=(11,3.2)) plt.subplot2grid((1,3),(0,0)) plt.title('actual coupling matrix') plt.imshow(w0,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.5,0.5) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.5,0,0.5]) plt.subplot2grid((1,3),(0,1)) plt.title('predicted coupling matrix') plt.imshow(w,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.5,0.5) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.5,0,0.5]) plt.subplot2grid((1,3),(0,2)) plt.plot([-1,1],[-1,1],'r--') plt.scatter(w0,w) plt.xlabel('actual couplings') plt.ylabel('inferred couplings') plt.tight_layout(h_pad=1, w_pad=1.5) plt.show() # - # The inference accuracy is measured by mean square error between actual couplings and inferred couplings: MSE = $\frac{1}{N^{2}} \sum_{i,j=1}^N (W_{ij} - W_{ij}^{\text{actual}})$ MSE = ((w0-w)**2).mean() print(MSE)
.ipynb_checkpoints/er_def-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install pyjugex from pyjugex import PyjugexAnalysis import nibabel as nib import requests import pyjugex pmap_service_url='http://pmap-pmap-service.apps-dev.hbp.eu' hoc1_body = { "areas": [ { "name": "Area-hOc1", "hemisphere": "left" } ], "threshold": 0.2 } with pyjugex.util.get_pmap(url=f'{pmap_service_url}/multimerge_v2', json=hoc1_body) as resp, open('./hoc1_l_masked.nii.gz', 'wb') as out_file: out_file.write(resp.content) hoc2_body = { "areas": [ { "name": "Area-hOc2", "hemisphere": "left" } ], "threshold": 0.2 } with pyjugex.util.get_pmap(url=f'{pmap_service_url}/multimerge_v2', json=hoc2_body) as resp, open('./hoc2_l_masked.nii.gz', 'wb') as out_file: out_file.write(resp.content) # + hoc1_nii = nib.load('./hoc1_l_masked.nii.gz') hoc2_nii = nib.load('./hoc2_l_masked.nii.gz') gene_list=['MAOA','TAC1'] n_rep = 1000 # - analysis = PyjugexAnalysis( n_rep=1000, gene_list=gene_list, roi1 = hoc1_nii, roi2 = hoc2_nii ) analysis.differential_analysis() # Go grab a coffee print(analysis.anova.result)
pyjugex-test.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.3 # language: julia # name: julia-1.6 # --- # # User Item Biases With Regularization # * Prediction for user $i$ and item $j$ is $\tilde r_{ij} = u_i + a_j$ # * Loss function is $L = \sum_{\Omega}(r_{ij} - u_i - a_j)^2 + \lambda_u \sum_i (u_i - \bar u) ^2 + \lambda_a \sum_j (a_j - \bar a)^2 $ # * $\bar u$ is the mean of $u_i$ and $\bar a$ is the mean of $a_j$ # * $\Omega$ is the set of oberved pairs $(i, j)$ # * $r_{ij}$ is the rating for user $i$ and item $j$ source = "UserItemBiases"; # + tags=[] using NBInclude @nbinclude("Alpha.ipynb"); # - # # Alternating Least Squares Algorithm # * $u_i = \dfrac{\sum_{j \in \Omega_i}(r_{ij} - a_j) + \bar u \lambda_u}{|\Omega_i| + \lambda_u} = \dfrac{\rho_i + \bar u \lambda_u}{|\Omega_i| + \lambda_u}$ # * $\Omega$ is the set of (user, item) pairs that we have ratings for # * $\Omega_i$ is subset of $\Omega$ for which the user is the $i$-th user # + function get_residuals!(users, items, ratings, u, a, ρ, Ω) for row = 1:length(users) i = users[row] j = items[row] r = ratings[row] ρ[i] += r - a[j] Ω[i] += 1 end ρ, Ω end # todo move to utils function thread_range(n) tid = Threads.threadid() nt = Threads.nthreads() d, r = divrem(n, nt) from = (tid - 1) * d + min(r, tid - 1) + 1 to = from + d - 1 + (tid ≤ r ? 1 : 0) from:to end function update_users!(users, items, ratings, u, a, λ_u, ρ, Ω) Threads.@threads for t = 1:Threads.nthreads() range = thread_range(length(ratings)) ρ[:, Threads.threadid()] .= 0 Ω[:, Threads.threadid()] .= 0 @views get_residuals!( users[range], items[range], ratings[range], u, a, ρ[:, Threads.threadid()], Ω[:, Threads.threadid()], ) end ρ = sum(ρ, dims = 2) Ω = sum(Ω, dims = 2) μ = mean(u) Threads.@threads for i = 1:length(u) u[i] = (ρ[i] + μ * λ_u) / (Ω[i] + λ_u) end end; # - function train_model(training, λ_u, a, stop_criteria) @debug "training model with parameters [$λ_u]" users, items, ratings = training.user, training.item, training.rating u = zeros(eltype(λ_u), maximum(users)) ρ_u = zeros(eltype(u), length(u), Threads.nthreads()) Ω_u = zeros(eltype(u), length(u), Threads.nthreads()) while !stop!(stop_criteria, [u]) update_users!(users, items, ratings, u, a, λ_u, ρ_u, Ω_u) @debug u end u end; function make_prediction(users, items, u, a) r = zeros(eltype(u), length(users)) u_mean = mean(u) a_mean = mean(a) for i = 1:length(r) if users[i] > length(u) r[i] += mean(u) else r[i] += u[users[i]] end if items[i] > length(a) r[i] += mean(a) else r[i] += a[items[i]] end end r end; # # Compute Alphas training = get_split("recommendee") params = read_params(source); training.user .= 1 # relabel ids so that recommendee -> 1 stop_criteria = convergence_stopper(1e-9) u = train_model(training, params["λ"][1], params["a"], stop_criteria); model(items) = make_prediction(fill(1, length(items)), items, u, params["a"]); write_recommendee_alpha(model);
notebooks/ProductionAlphas/UserItemBiases.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [ MATPLOTLIB Day -3] # ## Histogram: # Used for numerical data import matplotlib.pyplot as plt import pandas as pd import numpy as np # %matplotlib inline # ## rwidth: # + blood_sugar = [113,85,90,150,149,88,93,115,135,80,77,82,129] plt.hist(blood_sugar, rwidth = 0.6) # - # ## bins plt.hist(blood_sugar, rwidth = 0.5, bins = 4) # ## title, xlabel and ylabel: plt.title("Blood Sugar Chart") plt.xlabel("Sugar Label") plt.ylabel("Number of Patients") plt.hist(blood_sugar,bins = [80,100,125,150], rwidth = 0.95, color = "red") plt.title("Blood Sugar Chart") plt.xlabel("Sugar Label") plt.ylabel("Number of Patients") plt.hist(blood_sugar,bins = 3, rwidth = 0.95, color = "red") # Raw Data # # 80-100 Normal # # 100 -123 Pre-Diabetic # # 125 -150 Diabetic # + plt.title("Blood Sugar Chart") plt.xlabel("Sugar Label") plt.ylabel("Number of Patients") blood_sugar_men = [113,85,90,150,149,88,93,115,135,80,77,82,129] blood_sugar_women = [67,98,89,120,133,150,84,69,89,79,120,112,100] plt.hist([blood_sugar_men,blood_sugar_women], bins = [80,100,125,150], rwidth = 0.95, color = ["Blue","Purple"]) # - # ## Legend: # + plt.title("Blood Sugar Chart") plt.xlabel("Sugar Label") plt.ylabel("Number of Patients") blood_sugar_men = [113,85,90,150,149,88,93,115,135,80,77,82,129] blood_sugar_women = [67,98,89,120,133,150,84,69,89,79,120,112,100] plt.hist([blood_sugar_men,blood_sugar_women], bins = [80,100,125,150], rwidth = 0.95, color = ["Blue","Purple"], label = ["Men","Women"]) plt.legend() # - # ## histtype: plt.title("Blood Sugar Chart") plt.xlabel("Sugar Label") plt.ylabel("Number of Patients") plt.hist(blood_sugar, bins = [80,100,125,150], rwidth = 0.95, histtype = "step") plt.title("Blood Sugar Chart") plt.xlabel("Sugar Label") plt.ylabel("Number of Patients") plt.hist(blood_sugar, bins = [80,100,125,150], rwidth = 0.95, histtype = "barstacked") plt.title("Blood Sugar Chart") plt.xlabel("Sugar Label") plt.ylabel("Number of Patients") plt.hist(blood_sugar, bins = [80,100,125,150], rwidth = 0.95, histtype = "bar") plt.title("Blood Sugar Chart") plt.xlabel("Sugar Label") plt.ylabel("Number of Patients") plt.hist(blood_sugar, bins = [80,100,125,150], rwidth = 0.95, histtype = "stepfilled") # ## orientation: plt.title("Blood Sugar Chart") plt.xlabel("Sugar Label") plt.ylabel("Number of Patients") plt.hist(blood_sugar, bins = [80,100,125,150], rwidth = 0.95, orientation = "horizontal") # # [ ...cotd of numpy ] # ## C-order: row wise # ## F-order: col wise a = np.arange(9).reshape(3,3) a a.ndim # ## .ravel() a.ravel() a.ravel().ndim type(a.ravel()) # ## .flatten() a.flatten() a.flatten().ndim type(a.flatten) # ## Difference between .ravel() and .flatten() methods: x = np.arange(9).reshape(3,3) ravelled_arr = x.ravel() ravelled_arr[2] = 100 print(ravelled_arr) print("\n") print(x) x = np.arange(9).reshape(3,3) flatten_arr = x.flatten() flatten_arr[2] = 100 print(flatten_arr) print("\n") print(x) # ## The original content changes when .ravel is used(), which is not the case in .flatten() method # ## .T y = np.arange(9) y y.T # ## .shape y.shape = [3,3] y y.T # ## .transpose() y.transpose() y = np.arange() # ## .resize() np.resize(y,(6,6)) y.min() y.max() y.sum() y.count() y.std() y.mean() np.sqrt(y) y # ## Adding the elements col-wise: y.sum(axis = 0) # ## Adding the elements row-wise: y.sum(axis=1) # # Axis = 0 for col, Axis = 1 for row # # Operators on Arrays: a = np.arange(1,5).reshape(2,2) b = np.arange(5,9).reshape(2,2) print(a) print(b) a+b a-b a*b b*a a/b # ## .dot multiplication a.dot(b) b.dot(a)
Data-Science-HYD-2k19/Day-based/Day 29.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Model Understanding # # Simply examining a model's performance metrics is not enough to select a model and promote it for use in a production setting. While developing an ML algorithm, it is important to understand how the model behaves on the data, to examine the key factors influencing its predictions and to consider where it may be deficient. Determination of what "success" may mean for an ML project depends first and foremost on the user's domain expertise. # # EvalML includes a variety of tools for understanding models, from graphing utilities to methods for explaining predictions. # # # ** Graphing methods on Jupyter Notebook and Jupyter Lab require [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/user_install.html) to be installed. # # ** If graphing on Jupyter Lab, [jupyterlab-plotly](https://plotly.com/python/getting-started/#jupyterlab-support-python-35) required. To download this, make sure you have [npm](https://nodejs.org/en/download/) installed. # ## Graphing Utilities # First, let's train a pipeline on some data. # + import evalml from evalml.pipelines import BinaryClassificationPipeline X, y = evalml.demos.load_breast_cancer() X_train, X_holdout, y_train, y_holdout = evalml.preprocessing.split_data(X, y, problem_type='binary', test_size=0.2, random_seed=0) pipeline_binary = BinaryClassificationPipeline(component_graph = { "Label Encoder": ["Label Encoder", "X", "y"], "Imputer": ["Imputer", "X", "Label Encoder.y"], "Random Forest Classifier": [ "Random Forest Classifier", "Imputer.x", "Label Encoder.y", ], }) pipeline_binary.fit(X_train, y_train) print(pipeline_binary.score(X_holdout, y_holdout, objectives=['log loss binary'])) # - # ### Feature Importance # # We can get the importance associated with each feature of the resulting pipeline pipeline_binary.feature_importance # We can also create a bar plot of the feature importances pipeline_binary.graph_feature_importance() # ### Permutation Importance # # We can also compute and plot [the permutation importance](https://scikit-learn.org/stable/modules/permutation_importance.html) of the pipeline. from evalml.model_understanding import calculate_permutation_importance calculate_permutation_importance(pipeline_binary, X_holdout, y_holdout, 'log loss binary') from evalml.model_understanding import graph_permutation_importance graph_permutation_importance(pipeline_binary, X_holdout, y_holdout, 'log loss binary') # ### Human Readable Importance # # We can generate a more human-comprehensible understanding of either the feature or permutation importance by using `readable_explanation(pipeline)`. This picks out a subset of features that have the highest impact on the output of the model, sorting them into either "heavily" or "somewhat" influential on the model. These features are selected either by feature importance or permutation importance with a given objective. If there are any features that actively decrease the performance of the pipeline, this function highlights those and recommends removal. # # Note that permutation importance runs on the original input features, while feature importance runs on the features as they were passed in to the final estimator, having gone through a number of preprocessing steps. The two methods will highlight different features as being important, and feature names may vary as well. from evalml.model_understanding import readable_explanation readable_explanation(pipeline_binary, X_holdout, y_holdout, objective="log loss binary", importance_method="permutation") readable_explanation(pipeline_binary, importance_method="feature") # feature importance doesn't require X and y # We can adjust the number of most important features visible with the `max_features` argument, or modify the minimum threshold for "importance" with `min_importance_threshold`. However, these values will not affect any detrimental features displayed, as this function always displays all of them. # ### Partial Dependence Plots # We can calculate the one-way [partial dependence plots](https://christophm.github.io/interpretable-ml-book/pdp.html) for a feature. from evalml.model_understanding.graphs import partial_dependence partial_dependence(pipeline_binary, X_holdout, features='mean radius', grid_resolution=5) from evalml.model_understanding.graphs import graph_partial_dependence graph_partial_dependence(pipeline_binary, X_holdout, features='mean radius', grid_resolution=5) # You can also compute the partial dependence for a categorical feature. We will demonstrate this on the fraud dataset. # + X_fraud, y_fraud = evalml.demos.load_fraud(100, verbose=False) X_fraud.ww.init(logical_types={"provider": "Categorical", 'region': "Categorical", "currency": "Categorical", "expiration_date": "Categorical"}) fraud_pipeline = BinaryClassificationPipeline(["DateTime Featurizer","One Hot Encoder", "Random Forest Classifier"]) fraud_pipeline.fit(X_fraud, y_fraud) graph_partial_dependence(fraud_pipeline, X_fraud, features='provider') # - # Two-way partial dependence plots are also possible and invoke the same API. partial_dependence(pipeline_binary, X_holdout, features=('worst perimeter', 'worst radius'), grid_resolution=5) graph_partial_dependence(pipeline_binary, X_holdout, features=('worst perimeter', 'worst radius'), grid_resolution=5) # ### Confusion Matrix # # For binary or multiclass classification, we can view a [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix) of the classifier's predictions. In the DataFrame output of `confusion_matrix()`, the column header represents the predicted labels while row header represents the actual labels. from evalml.model_understanding.graphs import confusion_matrix y_pred = pipeline_binary.predict(X_holdout) confusion_matrix(y_holdout, y_pred) from evalml.model_understanding.graphs import graph_confusion_matrix y_pred = pipeline_binary.predict(X_holdout) graph_confusion_matrix(y_holdout, y_pred) # ### Precision-Recall Curve # # For binary classification, we can view the precision-recall curve of the pipeline. from evalml.model_understanding.graphs import graph_precision_recall_curve # get the predicted probabilities associated with the "true" label import woodwork as ww y_encoded = y_holdout.ww.map({'benign': 0, 'malignant': 1}) y_pred_proba = pipeline_binary.predict_proba(X_holdout)["malignant"] graph_precision_recall_curve(y_encoded, y_pred_proba) # ### ROC Curve # # For binary and multiclass classification, we can view the [Receiver Operating Characteristic (ROC) curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) of the pipeline. from evalml.model_understanding.graphs import graph_roc_curve # get the predicted probabilities associated with the "malignant" label y_pred_proba = pipeline_binary.predict_proba(X_holdout)["malignant"] graph_roc_curve(y_encoded, y_pred_proba) # The ROC curve can also be generated for multiclass classification problems. For multiclass problems, the graph will show a one-vs-many ROC curve for each class. # + from evalml.pipelines import MulticlassClassificationPipeline X_multi, y_multi = evalml.demos.load_wine() pipeline_multi = MulticlassClassificationPipeline(['Simple Imputer', 'Random Forest Classifier']) pipeline_multi.fit(X_multi, y_multi) y_pred_proba = pipeline_multi.predict_proba(X_multi) graph_roc_curve(y_multi, y_pred_proba) # - # ### Binary Objective Score vs. Threshold Graph # # [Some binary classification objectives](./objectives.ipynb) (objectives that have `score_needs_proba` set to False) are sensitive to a decision threshold. For those objectives, we can obtain and graph the scores for thresholds from zero to one, calculated at evenly-spaced intervals determined by `steps`. from evalml.model_understanding.graphs import binary_objective_vs_threshold binary_objective_vs_threshold(pipeline_binary, X_holdout, y_holdout, 'f1', steps=10) from evalml.model_understanding.graphs import graph_binary_objective_vs_threshold graph_binary_objective_vs_threshold(pipeline_binary, X_holdout, y_holdout, 'f1', steps=100) # ### Predicted Vs Actual Values Graph for Regression Problems # # We can also create a scatterplot comparing predicted vs actual values for regression problems. We can specify an `outlier_threshold` to color values differently if the absolute difference between the actual and predicted values are outside of a given threshold. # + from evalml.model_understanding.graphs import graph_prediction_vs_actual from evalml.pipelines import RegressionPipeline X_regress, y_regress = evalml.demos.load_diabetes() X_train_reg, X_test_reg, y_train_reg, y_test_reg = evalml.preprocessing.split_data(X_regress, y_regress, problem_type='regression') pipeline_regress = RegressionPipeline(['One Hot Encoder', 'Linear Regressor']) pipeline_regress.fit(X_train_reg, y_train_reg) y_pred = pipeline_regress.predict(X_test_reg) graph_prediction_vs_actual(y_test_reg, y_pred, outlier_threshold=50) # - # Now let's train a decision tree on some data. pipeline_dt = BinaryClassificationPipeline(['Simple Imputer', 'Decision Tree Classifier']) pipeline_dt.fit(X_train, y_train) # ### Tree Visualization # # We can visualize the structure of the Decision Tree that was fit to that data, and save it if necessary. # + from evalml.model_understanding.graphs import visualize_decision_tree visualize_decision_tree(pipeline_dt.estimator, max_depth=2, rotate=False, filled=True, filepath=None) # - # ## Explaining Predictions # We can explain why the model made certain predictions with the [explain_predictions](../autoapi/evalml/model_understanding/prediction_explanations/explainers/index.rst#evalml.model_understanding.prediction_explanations.explainers.explain_predictions) function. This can use either the [Shapley Additive Explanations (SHAP)](https://github.com/slundberg/shap) algorithm or the [Local Interpretable Model-agnostic Explanations (LIME)](https://github.com/marcotcr/lime) algorithm to identify the top features that explain the predicted value. # # This function can explain both classification and regression models - all you need to do is provide the pipeline, the input features, and a list of rows corresponding to the indices of the input features you want to explain. The function will return a table that you can print summarizing the top 3 most positive and negative contributing features to the predicted value. # # In the example below, we explain the prediction for the third data point in the data set. We see that the `worst concave points` feature increased the estimated probability that the tumor is malignant by 20% while the `worst radius` feature decreased the probability the tumor is malignant by 5%. # # + from evalml.model_understanding.prediction_explanations import explain_predictions table = explain_predictions(pipeline=pipeline_binary, input_features=X_holdout, y=None, indices_to_explain=[3], top_k_features=6, include_explainer_values=True) print(table) # - # The interpretation of the table is the same for regression problems - but the SHAP value now corresponds to the change in the estimated value of the dependent variable rather than a change in probability. For multiclass classification problems, a table will be output for each possible class. # # Below is an example of how you would explain three predictions with [explain_predictions](../autoapi/evalml/model_understanding/prediction_explanations/explainers/index.rst#evalml.model_understanding.prediction_explanations.explainers.explain_predictions). # + from evalml.model_understanding.prediction_explanations import explain_predictions report = explain_predictions(pipeline=pipeline_binary, input_features=X_holdout, y=y_holdout, indices_to_explain=[0, 4, 9], include_explainer_values=True, output_format='text') print(report) # - # The above examples used the SHAP algorithm, since that is what `explain_predictions` uses by default. If you would like to use LIME instead, you can change that with the `algorithm="lime"` argument. # + from evalml.model_understanding.prediction_explanations import explain_predictions table = explain_predictions(pipeline=pipeline_binary, input_features=X_holdout, y=None, indices_to_explain=[3], top_k_features=6, include_explainer_values=True, algorithm="lime") print(table) # + from evalml.model_understanding.prediction_explanations import explain_predictions report = explain_predictions(pipeline=pipeline_binary, input_features=X_holdout, y=None, indices_to_explain=[0, 4, 9], include_explainer_values=True, output_format='text', algorithm="lime") print(report) # - # ### Explaining Best and Worst Predictions # # When debugging machine learning models, it is often useful to analyze the best and worst predictions the model made. The [explain_predictions_best_worst](../autoapi/evalml/model_understanding/prediction_explanations/explainers/index.rst#evalml.model_understanding.prediction_explanations.explainers.explain_predictions_best_worst) function can help us with this. # # This function will display the output of [explain_predictions](../autoapi/evalml/model_understanding/prediction_explanations/explainers/index.rst#evalml.model_understanding.prediction_explanations.explainers.explain_predictions) for the best 2 and worst 2 predictions. By default, the best and worst predictions are determined by the absolute error for regression problems and [cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) for classification problems. # # We can specify our own ranking function by passing in a function to the `metric` parameter. This function will be called on `y_true` and `y_pred`. By convention, lower scores are better. # # At the top of each table, we can see the predicted probabilities, target value, error, and row index for that prediction. For a regression problem, we would see the predicted value instead of predicted probabilities. # # + from evalml.model_understanding.prediction_explanations import explain_predictions_best_worst shap_report = explain_predictions_best_worst(pipeline=pipeline_binary, input_features=X_holdout, y_true=y_holdout, include_explainer_values=True, top_k_features=6, num_to_explain=2) print(shap_report) # + lime_report = explain_predictions_best_worst(pipeline=pipeline_binary, input_features=X_holdout, y_true=y_holdout, include_explainer_values=True, top_k_features=6, num_to_explain=2, algorithm="lime") print(lime_report) # - # We use a custom metric ([hinge loss](https://en.wikipedia.org/wiki/Hinge_loss)) for selecting the best and worst predictions. See this example: # + import numpy as np def hinge_loss(y_true, y_pred_proba): probabilities = np.clip(y_pred_proba.iloc[:, 1], 0.001, 0.999) y_true[y_true == 0] = -1 return np.clip(1 - y_true * np.log(probabilities / (1 - probabilities)), a_min=0, a_max=None) report = explain_predictions_best_worst(pipeline=pipeline_binary, input_features=X, y_true=y, include_explainer_values=True, num_to_explain=5, metric=hinge_loss) print(report) # - # ### Changing Output Formats # # Instead of getting the prediction explanations as text, you can get the report as a python dictionary or pandas dataframe. All you have to do is pass `output_format="dict"` or `output_format="dataframe"` to either `explain_prediction`, `explain_predictions`, or `explain_predictions_best_worst`. # ### Single prediction as a dictionary import json single_prediction_report = explain_predictions(pipeline=pipeline_binary, input_features=X_holdout, indices_to_explain=[3], y=y_holdout, top_k_features=6, include_explainer_values=True, output_format="dict") print(json.dumps(single_prediction_report, indent=2)) # ### Single prediction as a dataframe single_prediction_report = explain_predictions(pipeline=pipeline_binary, input_features=X_holdout, indices_to_explain=[3], y=y_holdout, top_k_features=6, include_explainer_values=True, output_format="dataframe") single_prediction_report # ### Best and worst predictions as a dictionary report = explain_predictions_best_worst(pipeline=pipeline_binary, input_features=X, y_true=y, num_to_explain=1, top_k_features=6, include_explainer_values=True, output_format="dict") print(json.dumps(report, indent=2)) # ### Best and worst predictions as a dataframe report = explain_predictions_best_worst(pipeline=pipeline_binary, input_features=X_holdout, y_true=y_holdout, num_to_explain=1, top_k_features=6, include_explainer_values=True, output_format="dataframe") report # ### Force Plots # Force plots can be generated to predict single or multiple rows for binary, multiclass and regression problem types. These use the SHAP algorithm. Here's an example of predicting a single row on a binary classification dataset. The force plots show the predictive power of each of the features in making the negative ("Class: 0") prediction and the positive ("Class: 1") prediction. # + import shap from evalml.model_understanding.force_plots import graph_force_plot rows_to_explain = [0] # Should be a list of integer indices of the rows to explain. results = graph_force_plot(pipeline_binary, rows_to_explain=rows_to_explain, training_data=X_holdout, y=y_holdout) for result in results: for cls in result: print("Class:", cls) display(result[cls]["plot"]) # - # Here's an example of a force plot explaining multiple predictions on a multiclass problem. These plots show the force plots for each row arranged as consecutive columns that can be ordered by the dropdown above. Clicking the column indicates which row explanation is underneath. # + rows_to_explain = [0,1,2,3,4] # Should be a list of integer indices of the rows to explain. results = graph_force_plot(pipeline_multi, rows_to_explain=rows_to_explain, training_data=X_multi, y=y_multi) for idx, result in enumerate(results): print("Row:", idx) for cls in result: print("Class:", cls) display(result[cls]["plot"]) # - # ## Find Confusion Matrix and thresholds for Binary Classification Pipelines # # For binary classification pipelines, EvalML also provides the ability to compare the actual positive and actual negative histograms, as well as obtaining the confusion matrices and ideal thresholds per objective. # + from evalml.model_understanding import find_confusion_matrix_per_thresholds df, objective_thresholds = find_confusion_matrix_per_thresholds(pipeline_binary, X, y, n_bins=10) df.head(10) # - objective_thresholds # In the above results, the first dataframe contains the histograms for the actual positive and negative classes, indicated by `true_pos_count` and `true_neg_count`. The columns `true_positives`, `true_negatives`, `false_positives`, and `false_negatives` contain the confusion matrix information for the associated threshold, and the `data_in_bins` holds a random subset of row indices (both postive and negative) that belong in each bin. The index of the dataframe represents the associated threshold. For instance, at index `0.1`, there is 1 positive and 309 negative rows that fall between `[0.0, 0.1]`. # # The returned `objective_thresholds` dictionary has the objective measure as the key, and the dictionary value associated contains both the best objective score and the threshold that results in the associated score.
docs/source/user_guide/model_understanding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Jupyter Notebook problems in the Essentials of Paleomagnetism Textbook by <NAME> # ## Problems in Chapter 8 # ## Problem 1 # I want to use the function **ipmag.curie()**. This is in the **ipmag** module which I must first import. Then I need to understand how to call the function, so I use the help function for that. import pmagpy.ipmag as ipmag help(ipmag.curie) # I want to just look at the figures: ipmag.curie(path_to_file='Chapter_8',file_name='curie_example.dat') # The Curie Temperature of this specimen is about 550$^{\circ}$C. # Now for the other file... ipmag.curie(path_to_file='Chapter_8',file_name='curie_example2.dat') # Hmmm. This one IS noisier and so I should try it with a truncated window homing in on what seems to be the true Curie Temperature of around 580$^{\circ}$C. ipmag.curie(path_to_file='Chapter_8',file_name='curie_example2.dat',t_begin=400,t_end=600) # So it appears that the 'true' Tc for this specimen is about 576$^{\circ}$C. # ## Problem 2: # To do this problem, I first have to read in the data file. I looked at it with a text editor first and noticed that there was some sort of description in the first line and column headers in the second (or line 1 counting from zero as python normally does). This is a tab delimited file, so the separation (sep) is '\t'. I read in the data with pandas, print out the top few lines and convert it to a dataframe here: import pandas as pd # import the module # read in the data data=pd.read_csv('Chapter_8/loess_rockmag.dat', sep='\t', header=1) print (data.head()) # print the top few lines data=pd.DataFrame(data) # convert to a Pandas DataFrame # Now I want to make a bunch of plots. One of them is for the ferromagnetic susceptibility. I have to subtract 60 nm$^3$kg$^{-1}$ from the total susceptibility (data['chi']). Because the units of $\chi$ are in $\mu$m$^3$kg$^{-1}$, I multiply the 60 by 10$^{-3}$ to get from nano (10$^{-9}$) to micro (10$^{-6}$). data['chi_f']=data['chi']-60e-3 # calculate ferromagnetic susceptibility import matplotlib.pyplot as plt # %matplotlib inline plt.plot(data['chi'],-data['Depth']) plt.ylabel('Depth (m) below S0') plt.xlabel(r'$\chi$ ($\mu$m$^3$kg$^{-1}$)') plt.plot(data['chi_f'],-data['Depth']); # The plot above shows total $\chi$ in blue and $\chi_f$ in green. # plot the sIRM versus depth below reference horizon plt.plot(data['sIRM'],-data['Depth']) plt.ylabel('Depth (m) below S0') plt.xlabel(r'sIRM (mAm$^2$kg$^{-1}$)'); # and now for the chi_f over sIRM: plt.plot(data['chi_f']/data['sIRM'],-data['Depth']) plt.ylabel('Depth (m) below S0') plt.xlabel(r'$\chi_f$/sIRM)'); # Read the Hunt et al. (1995) paper and figure it out! # ## Problem 3: # For this problem, we must read in the data, convert the volume normalized $\chi$ into mass normalized units, and the mass into density. Then we plot the $\chi$ data versus color (by number) and density. Looking at the data in the datafile, it also has some sort of header and columns in the second line (as in Problem 2). data=pd.read_csv('Chapter_8/beach_sand.dat', sep='\t', header=1) print (data.head() )# print the top few lines data=pd.DataFrame(data) # convert to a Pandas DataFrame # First convert mass to kg. Then multiply chi by units of 10^-5, then by the assumed volume (in m$^3$) and then divide by mass (in kg). data['mass_kg']=data['mass']*1e-3 # convert from grams to kilograms. data['chi_norm']=data['chi']*1e-5 # put into SI data['chi_norm']=data['chi_norm']*7e-6 # take out volume in m^3 data['chi_norm']=data['chi_norm']/(data['mass_kg']) # divide by mass in kg print (data.head()) # We need density which should be in kg/m$^3$. data['rho']=data['mass_kg']/7e-6 plt.plot(data['Specimen'],data['chi_norm'],'ro') # plot as red dots plt.xlabel('Color') plt.ylabel('Mass normalized susceptibility'); # The darker the specimen (higher Specimen number), the higher the susceptibility. plt.plot(data['rho'],data['chi_norm'],'ro') plt.xlabel('Density') plt.ylabel('Mass normalized susceptibility'); # The denser the specimen, the higher the susceptibility, but the relationship is not linear. # The darker minerals are magnetite, pyroxene, amphibole and biotite. Magnetite is a very dense ($\sim$5000 kg/m$^3$) and very magnetic mineral. The more magnetite in the specimens, the denser and more magnetic (and darker) the specimen becomes. You could test this by extracting the most magnetic minerals out of the samples by suspending the material in water and using a magnet, then repeat the experiment. The remaining material should be less dense, lighter and less magnetic.
data_files/notebooks/Essentials/essentials_ch_8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Taking a look at the cMSSM dataset # # The constrained Minimimal Supersymmetric Standard Model (cMSSM) is the simplest realistic supersymmetric extension of the Standard Model of Particle Physics. # Supersymmetry is an additional symmetry that introduces new particles, superpartners to the normal particles. # This symmetry is broken in some unknown way, and this means that unconstrained MSSM has ~100 free parameters. # # In cMSSM this is reduced to four (and a half) parameters by making some assumptions at the so-called Grand Unification (GUT) scale (much higher energy than accessable by terrestial experiments.) # The assumtions, and free parameters, are; # # * the scalar particles all have the mass **M0** at the GUT scale, # * the partners of the gauge bosons, the gauginos, all have the mass **M12** (shortform for M1 and M2) at the GUT scale, # * the trilinear couplings of the particles are all **A0**, # * the ratio of the the vacuum expectation values of the two Higgs doublets are **TanBeta**, # * the 'half' parameter is **SignMu**, the sign of the higgsino mass parameter. # # cMSSM is generally considered as an unfavoured, if not excluded, theory but as it served as the first target for the community it still serves as a benchmark for new tools and techniques, partly because all subsequent theories tend toward more free parameters. # As such benchmark the Gambit collaboration performed an [global analysis of the cMSSM](https://arxiv.org/abs/1705.07935) as part of their first wave of papers. # They also made the data from that scan [publically available](https://zenodo.org/record/843496) and it is this dataset we explore in the notebook. # # The full dataset contains much more information than the subset we explore here. # We've extracted our model parameters, M0, M12, A0, TanBeta, SignMU, and the quantity we wish to predict: the LHC likelihood, **lnL** is the natural logarithm of the likelihood. # The LHC likelihood is a very complicated beast, reflecting the complexities of the LHC and the plethora of analyses, and we refer to the Gambit paper for the full definition. # + # %matplotlib inline import matplotlib.pyplot as plt from matplotlib.colors import LogNorm import h5py import pandas as pd # Nicer labels def label(k): l = k.split('::')[-1] if l == 'calc_LHC_LogLike': l = 'lnL' return l df = pd.DataFrame() with h5py.File('../CMSSM_subset.hdf5', 'r') as h5: for k in h5.keys(): df[label(k)] = h5[k] df.describe() # - # The dataset include ~7.3 million points. # The model parameter A0 have a large range from -10000 to 10000, while M0 is in ~240 to 10000, M12 is in ~440 to 6200 and TanBeta in ~3.9 to 56. # The prior ranges used in the scan are (from table 1, p. 4): # # | Param | Min | Max | # |:-------:|:------:|:------:| # | A0 | -10000 | 10000 | # | M0 | 50 | 10000 | # | M12 | 50 | 10000 | # | TanBeta | 3 | 70 | # # So we see that the whole ranges are not included, this is because the scanning methods Gambit has used are likelihood-driven. # The missing values are so excluded as to not be even sampled. # This sampling method do have an profound effect on the distribution of our points as we will see next. # # ## 1D histograms # # We construct histograms of the dataset in each of our variables. # Note that we use a logarithmic scale for the y-axis in order to make the structure more apparent. # Typically most points are in a few bins. # + for k in df: df[k].maximum = df[k].max() df[k].minimum = df[k].min() if k == 'SignMu': df[k].bins = np.linspace(-1.2, 1.2, 7) else: df[k].bins = np.linspace(df[k].minimum, df[k].maximum, 60+1) fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(12, 8)) for k, ax in zip(df, axes.flatten()): ax.hist(df[k], bins=df[k].bins) ax.set_xlabel(k) ax.set_yscale('log') # - # As suspected there is much structure in these histograms, we will discuss the origin of these after plotting the 2D histograms below. # For now we focus on the lnL histogram. # The dominating peak is at exactly 0, which is very specific and therefor unlikely value. # It arises from a filtering step in the methodology of Gambit scan. The calculation of lnL is very expensive, and to avoid unnecessary calculations they check if the production cross-section (essentially normalisation of the signal) is even remotely large enough for the signal from cMSSM to be detectable in the considered analyses. # If it's not then the point is assigned lnL = 0, the likelihood of the Standard Model (i.e. background-only hypothesis). # # This somewhat distort our view of lnL which might pose a problem in the machine learning phase. # A possible treatment is to include our own filter, either the same as Gambit necessitating cross-section calculations or a trained classifier. # Another is to jitter the zeros, adding noise and smoothing out discontinuities. # However, we should remember to measure before cuttin, it might not be a problem at all. # # The attentive reader might have noted that the max value of the lnL in the dataset is ~3, seemingly a better fit than the Standard Model! # This really due to fluctuations in the data, and in the final analysis the Gambit collaboration gives these point the value zero also. # There's no need for us to do the same, we would only lose information. # # ## 2D Histograms # # We will now plot the 2D histograms, or heatmaps as they are also known, of all combinations of the variables. # The colour denote the number of points in each bin, the color scale are different in each diagram and it's logarithmic. # + nvar = df.shape[1] fig, axes = plt.subplots(ncols=nvar-1, nrows=nvar-1, figsize=(nvar*3, nvar*3)) fig.subplots_adjust(wspace=0.0, hspace=0.0) for i, x in enumerate(df): for j, y in enumerate(df): if i == nvar-1 or j == 0: continue ax = axes[j-1,i] if i >= j: ax.axis('off') continue ax.hist2d(df[x], df[y], bins=(df[x].bins, df[y].bins), cmap='viridis', norm=LogNorm()) bbox_props = dict(fc='white', ec='white', alpha=0.6) ax.text(0.5, 0.95, x, transform=ax.transAxes, va='top', ha='center', bbox=bbox_props) ax.text(0.05, 0.5, y, transform=ax.transAxes, rotation=90, va='center', ha='left', bbox=bbox_props) if j == nvar-1: ax.set_xlabel(x) else: ax.set_xticks([]) if i == 0: ax.set_ylabel(y) else: ax.set_yticks([]) # - # The structures in the sampling becomes very explicit in 2D. # As we stated earlier the structure arises from the likelihood-driven scanning methodology used to sample the parameter space. # As we can see in the bottom row with much of the range feature less with regards to lnL, the likelihood used to the drive the scan is *not* our lnL however. # The scan by Gambit is a *global* analysis which combine data from many different experiments and observations, and it's the combined likelihood from all these that has driven the scan. # # The dominant term in the combined likelihood is the relic density of dark matter. # This term demands that the relic density, the density left in the Universe today, of the lightest cMSSM neutralino must be equal to or smaller than the observed dark matter density from the Planck satellite's measurement of the Cosmic Microwave Background (CMB). # # The relic density is inverse proportional to the neutralino-neutralino annihilation cross-section (i.e. how often when two neutralino meet they annihilated into other particles) and for much of the parameter space the cross-section is much too low to produce a small enough relic density. # Typically the cross-section much be enhanced by some *mechanism* such as a resonance or so-called co-annihilations. # We will not go into the details here but the different regions we observe in the sample are from different mechanisms. Figure 2 (p. 14) in the Gambit paper illustrate this. # # This obviously limits our understanding of the LHC likelihood, we might be missing interesting features and behaviour in regions disfavoured by other constraints. # On the other hand these regions simply aren't interesting from a dark matter perspective so these potential features aren't actually interesting. # A interesting complication is of course identifying when we are asking the trained algorithm to extrapolate outside the training data. # This complication is nicely connected to the buisiness of active learning. # # An additional observation is that for much of the parameter space the likelihood is zero. The space can be cut down by only considering A0 < ~0, M0 < ~5000, M12 < ~1500.
cmssm/notebooks/cmssm_data_exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # Generative Adversarial Networks in Keras # + # %matplotlib inline import importlib import utils2; importlib.reload(utils2) from utils2 import * from tqdm import tqdm # - # ## The original GAN! # See [this paper](https://arxiv.org/abs/1406.2661) for details of the approach we'll try first for our first GAN. We'll see if we can generate hand-drawn numbers based on MNIST, so let's load that dataset first. # # We'll be refering to the discriminator as 'D' and the generator as 'G'. from keras.datasets import mnist (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train.shape n = len(X_train) X_train = X_train.reshape(n, -1).astype(np.float32) X_test = X_test.reshape(len(X_test), -1).astype(np.float32) X_train /= 255.; X_test /= 255. # ## Train # This is just a helper to plot a bunch of generated images. def plot_gen(G, n_ex=16): plot_multi(G.predict(noise(n_ex)).reshape(n_ex, 28,28), cmap='gray') # Create some random data for the generator. def noise(bs): return np.random.rand(bs,100) # Create a batch of some real and some generated data, with appropriate labels, for the discriminator. def data_D(sz, G): real_img = X_train[np.random.randint(0,n,size=sz)] X = np.concatenate((real_img, G.predict(noise(sz)))) return X, [0]*sz + [1]*sz def make_trainable(net, val): net.trainable = val for l in net.layers: l.trainable = val # Train a few epochs, and return the losses for D and G. In each epoch we: # # 1. Train D on one batch from data_D() # 2. Train G to create images that the discriminator predicts as real. def train(D, G, m, nb_epoch=5000, bs=128): dl,gl=[],[] for e in tqdm(range(nb_epoch)): X,y = data_D(bs//2, G) dl.append(D.train_on_batch(X,y)) make_trainable(D, False) gl.append(m.train_on_batch(noise(bs), np.zeros([bs]))) make_trainable(D, True) return dl,gl # ## MLP GAN # We'll keep thinks simple by making D & G plain ole' MLPs. MLP_G = Sequential([ Dense(200, input_shape=(100,), activation='relu'), Dense(400, activation='relu'), Dense(784, activation='sigmoid'), ]) MLP_D = Sequential([ Dense(300, input_shape=(784,), activation='relu'), Dense(300, activation='relu'), Dense(1, activation='sigmoid'), ]) MLP_D.compile(Adam(1e-4), "binary_crossentropy") MLP_m = Sequential([MLP_G,MLP_D]) MLP_m.compile(Adam(1e-4), "binary_crossentropy") dl,gl = train(MLP_D, MLP_G, MLP_m, 8000) # The loss plots for most GANs are nearly impossible to interpret - which is one of the things that make them hard to train. plt.plot(dl[100:]) plt.plot(gl[100:]) # This is what's known in the literature as "mode collapse". plot_gen() # OK, so that didn't work. Can we do better?... # ## DCGAN # There's lots of ideas out there to make GANs train better, since they are notoriously painful to get working. The [paper introducing DCGANs](https://arxiv.org/abs/1511.06434) is the main basis for our next section. Add see https://github.com/soumith/ganhacks for many tips! # # Because we're using a CNN from now on, we'll reshape our digits into proper images. X_train = X_train.reshape(n, 28, 28, 1) X_test = X_test.reshape(len(X_test), 28, 28, 1) # Our generator uses a number of upsampling steps as suggested in the above papers. We use nearest neighbor upsampling rather than fractionally strided convolutions, as discussed in our style transfer notebook. CNN_G = Sequential([ Dense(512*7*7, input_dim=100, activation=LeakyReLU()), BatchNormalization(mode=2), Reshape((7, 7, 512)), UpSampling2D(), Convolution2D(64, 3, 3, border_mode='same', activation=LeakyReLU()), BatchNormalization(mode=2), UpSampling2D(), Convolution2D(32, 3, 3, border_mode='same', activation=LeakyReLU()), BatchNormalization(mode=2), Convolution2D(1, 1, 1, border_mode='same', activation='sigmoid') ]) # The discriminator uses a few downsampling steps through strided convolutions. # + CNN_D = Sequential([ Convolution2D(256, 5, 5, subsample=(2,2), border_mode='same', input_shape=(28, 28, 1), activation=LeakyReLU()), Convolution2D(512, 5, 5, subsample=(2,2), border_mode='same', activation=LeakyReLU()), Flatten(), Dense(256, activation=LeakyReLU()), Dense(1, activation = 'sigmoid') ]) CNN_D.compile(Adam(1e-3), "binary_crossentropy") # - # We train D a "little bit" so it can at least tell a real image from random noise. sz = n//200 x1 = np.concatenate([np.random.permutation(X_train)[:sz], CNN_G.predict(noise(sz))]) CNN_D.fit(x1, [0]*sz + [1]*sz, batch_size=128, nb_epoch=1, verbose=2) CNN_m = Sequential([CNN_G, CNN_D]) CNN_m.compile(Adam(1e-4), "binary_crossentropy") K.set_value(CNN_D.optimizer.lr, 1e-3) K.set_value(CNN_m.optimizer.lr, 1e-3) # Now we can train D & G iteratively. dl,gl = train(CNN_D, CNN_G, CNN_m, 2500) plt.plot(dl[10:]) plt.plot(gl[10:]) # Better than our first effort, but still a lot to be desired:... plot_gen(CNN_G) # ## End
deeplearning2/DCGAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/vishalpolley/Vehicle-Identification-and-Classification-System/blob/master/VICS.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="GODjrRPfigMb" colab_type="code" colab={} import os import cv2 from matplotlib import pyplot as plt import numpy as np import math import itertools import random as rd # + [markdown] id="F9Ofeg7DtECS" colab_type="text" # #Data Preparation for CNN # + id="xCtdBNHEtPKN" colab_type="code" outputId="0967e0b0-e729-4539-e4dd-bb04e69af566" colab={"base_uri": "https://localhost:8080/", "height": 54} from google.colab import drive drive.mount('/content/gdrive') # + colab_type="code" id="K46x1BHDuu8X" colab={} global_path = "gdrive/My Drive/VICS/" data_folder = "MIO-TCD-Classification.tar" path_to_test = "test/" path_to_train = "train/" class_ids = [x for x in os.listdir(path_to_train)] # + id="NlDv-UjIuwZx" colab_type="code" colab={} # import tarfile # print(global_path + data_folder) # tar_f = tarfile.open(global_path + data_folder) # tar_f.extractall() # + id="8F9VfigKy0KH" colab_type="code" colab={} def plot_images(list_of_images, max_col = 4): n = len(list_of_images) if n == 1: plt.imshow(list_of_images[0], cmap="gray"); plt.axis('off'); plt.show() else: # get number of columns and rows required r, c = 1, n if n > max_col: c = max_col r = int(math.ceil(n/max_col)) fig = plt.figure(figsize=(20, max_col * r)) for i, (img, name) in enumerate(list_of_images): ax = fig.add_subplot(r, c, (i+1)) ax.set_title(str(class_ids[name])) ax.axis('off') ax.imshow(img, cmap="gray") # + id="KJi7YKqVy7_c" colab_type="code" colab={} IMG_SIZE = 96 def small_pad_image(img, output_shape): BLACK = 0 result = [] h, w = img.shape ratio = float(output_shape)/max([h,w]) h_n, w_n = tuple([int(val * ratio) for val in (h,w)]) img_at_size = cv2.resize(img, (h_n, w_n)) delta_w = output_shape - w_n delta_h = output_shape - h_n top = math.ceil(delta_h / 2) bot = delta_h - top left = math.ceil(delta_w / 2) right = delta_w - left return cv2.resize(cv2.copyMakeBorder(img_at_size, left, right, top, bot, cv2.BORDER_CONSTANT, value=0), (output_shape, output_shape)) def unison_shuffled_copies(a, b): assert len(a) == len(b) p = np.random.permutation(len(a)) return a[p], b[p] def get_train_data(path_x, path_y, num_img = -1, fraction = 1): X, y = [], [] for c, id_ in enumerate(class_ids): print("Class ID: %s is changed to ID: %s." % (id_, c)) class_imgs = os.listdir(path_to_train + id_) if fraction < 1: num_img = int(len(class_imgs) * fraction) class_imgs = rd.sample(class_imgs,num_img) if num_img == -1: print(" Found %s images." % len(class_imgs)) else: print(" Found %s images." % num_img) for img in class_imgs: img = path_to_train + id_ + '/' + img y.append(c) X.append(small_pad_image(cv2.imread(img, 0), IMG_SIZE)) return unison_shuffled_copies(np.array(X), np.array(y)) # + id="qbRiba_6zCmq" colab_type="code" outputId="272fb5da-9ff0-4ac5-8426-f248bdf606d9" colab={"base_uri": "https://localhost:8080/", "height": 442} X, y = get_train_data(path_to_train, path_to_train, fraction = 0.25) print("\n\nFinal input shape:", X.shape) # + id="JGYAvBQOzikP" colab_type="code" outputId="cd14769f-374a-4f1e-c2a7-dd7990f99e11" colab={"base_uri": "https://localhost:8080/", "height": 954} plot_images([(a,b) for a, b in zip(X[:16], y[:16])]) # + id="58ThaGSh077F" colab_type="code" outputId="bc131f35-5a8c-44a7-e9ad-18a65f9a02be" colab={"base_uri": "https://localhost:8080/", "height": 34} import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D, BatchNormalization from keras import backend as K from keras.callbacks import ReduceLROnPlateau from keras.preprocessing.image import ImageDataGenerator # + id="a8mPXOhh1Elg" colab_type="code" colab={} batch_size = 256 num_classes = 11 epochs = 40 # test and training dataset sizes train_test_split = 0.9 split_val = int(len(X) * train_test_split) # input image dimensions img_rows, img_cols = X[0].shape # + id="NKMGxgRO1GNO" colab_type="code" outputId="dc9969ed-2019-4ef8-91af-45b797a7c51d" colab={"base_uri": "https://localhost:8080/", "height": 68} # the data, split between train and val sets (x_train, y_train), (x_val, y_val) = (X[:split_val],y[:split_val]), (X[split_val:],y[split_val:]) if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_val = x_val.reshape(x_val.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_val = x_val.reshape(x_val.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_val = x_val.astype('float32') x_train /= 255 x_val /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_val.shape[0], 'val samples') # + id="VCeyB_P61K9z" colab_type="code" colab={} # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_val = keras.utils.to_categorical(y_val, num_classes) datagen = ImageDataGenerator( rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.1, zoom_range=0.1, horizontal_flip=True, fill_mode='nearest') datagen.fit(x_train) train_generator = datagen.flow(x_train, y_train, batch_size=batch_size) # val_datagen = ImageDataGenerator() # val_datagen.fit(x_val) # validation_generator = val_datagen.flow(x_val, y_val, batch_size=batch_size) # + id="kPHkIPBi1UMq" colab_type="code" outputId="d5ea75da-03fd-4e3a-8bd4-a66133540aaa" colab={"base_uri": "https://localhost:8080/", "height": 1618} model_cnn_2 = Sequential() model_cnn_2.add(Conv2D(32, (3, 3), activation='relu',input_shape=input_shape)) model_cnn_2.add(BatchNormalization()) model_cnn_2.add(Conv2D(32, (3, 3), activation='relu')) model_cnn_2.add(BatchNormalization()) model_cnn_2.add(MaxPooling2D(pool_size=(2, 2))) model_cnn_2.add(Dropout(0.1)) model_cnn_2.add(Conv2D(64, (3, 3), padding='same', activation='relu')) model_cnn_2.add(BatchNormalization()) model_cnn_2.add(Conv2D(64, (3, 3), activation='relu')) model_cnn_2.add(BatchNormalization()) model_cnn_2.add(MaxPooling2D(pool_size=(2, 2))) model_cnn_2.add(Dropout(0.1)) model_cnn_2.add(Conv2D(128, (3, 3), padding='same', activation='relu')) model_cnn_2.add(BatchNormalization()) model_cnn_2.add(Conv2D(128, (3, 3), activation='relu')) model_cnn_2.add(BatchNormalization()) model_cnn_2.add(MaxPooling2D(pool_size=(2, 2))) model_cnn_2.add(Dropout(0.1)) model_cnn_2.add(Flatten()) model_cnn_2.add(Dense(512, activation='relu')) model_cnn_2.add(Dropout(0.3)) model_cnn_2.add(Dense(num_classes, activation='softmax')) optimizer = keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) # Set our optimizer and loss function (similar settings to our CAE approach) model_cnn_2.compile(loss = keras.losses.categorical_crossentropy, optimizer = optimizer, metrics = ['categorical_accuracy']) callbacks = [ ReduceLROnPlateau(monitor='val_categorical_accuracy', factor=0.1, patience=5, min_delta=0.0001, mode='auto', cooldown=0, verbose=1, min_lr=0), ] hist = model_cnn_2.fit_generator(train_generator, steps_per_epoch = x_train.shape[0] // batch_size, epochs=epochs, verbose=1, validation_data=(x_val, y_val), callbacks = callbacks, ) score = model_cnn_2.evaluate(x_val, y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # + id="bBQ9CE4WkrLa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 884} outputId="6fbefb13-e525-4607-eb8d-e55c250c13f7" model_cnn_2.summary() # + id="MG7RXyc9k2tc" colab_type="code" colab={} y_pred = model_cnn_2.predict_classes(x_val) y_val_int = np.argmax(y_val, axis=1) # Compute confusion matrix cnf_matrix = metrics.confusion_matrix(y_val_int, y_pred) np.set_printoptions(precision=2) # + id="sY1rdNwPlZ6F" colab_type="code" colab={} def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # + id="evvbuLzplcEV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1748} outputId="7dc53a6d-146b-4f7d-e72a-d0400db3ba0d" # Plot non-normalized confusion matrix plt.figure(figsize=(14, 8)) plot_confusion_matrix(cnf_matrix, classes=class_ids, title='Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure(figsize=(14, 8)) plot_confusion_matrix(cnf_matrix, classes=class_ids, normalize=True, title='Normalized confusion matrix') plt.show()
VICS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # library # + # # %%capture # # # !pip install -q nnAudio # # !pip install -q --upgrade wandb # # !pip install -q grad-cam # # # !pip install -q ttach # # # !pip install efficientnet_pytorch # # # !pip install albumentations # # !pip install line_profiler # # !pip install transformers # # !pip install audiomentations # # !pip3 install pydub # + # # !pip install "ipykernel<6" # # !pip install "jupyterlab<3.1" # + import os import gc import json import random from datetime import datetime import time import collections import itertools from itertools import chain, combinations import sys import json import wandb import h5py from glob import glob import pickle import scipy as sp import numpy as np np.set_printoptions(precision=5, suppress=True) import pandas as pd import matplotlib.pyplot as plt # import seaborn as sns from sklearn.metrics import roc_auc_score from sklearn.model_selection import StratifiedKFold, GroupKFold, KFold import IPython.display from tqdm.auto import tqdm from skimage.transform import resize import torch from torch import nn from torch.utils.data import DataLoader, Dataset from torch.nn import functional as torch_functional from torch.optim import Adam, SGD, AdamW from torch.optim.lr_scheduler import (CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau,_LRScheduler,CyclicLR) from torch.cuda.amp import autocast, GradScaler from transformers import get_cosine_schedule_with_warmup import audiomentations as A from audiomentations import Compose, AddGaussianNoise, TimeStretch, PitchShift, Shift, PolarityInversion # - # %load_ext line_profiler # # Configuration # + class Config: #frequently changed model_name = 'TCNN' model_version = "main_35th_GeM_vflip_shuffle01_5fold" use_pretrain = False use_pseudo_label = False debug = False use_checkpoint = False use_lr_finder = False use_subset = False subset_frac = 0.4 #preproc related #augmentation vflip = True time_shift = False time_stretch = False divide_std = False#std changed... tbs shuffle_channels = False #need normalization first add_gaussian_noise = False #need normalization first shuffle01 = True timemask = False shift_channel = False pitch_shift = False use_mixup = False mixup_alpha = 0.1 cropping = False #logistic seed = 48 target_size = 1 target_col = 'target' n_fold = 5 # gdrive = './drive/MyDrive/Kaggle/G2Net/input/' kaggle_json_path = 'kaggle/kaggle.json' output_dir = "G2Net-Model/" #logger print_num_steps=350 #training related train_folds = [0,1,2,3,4] epochs = 12 batch_size = 256 lr= 5e-3#5e-3 # Optimizer weight_decay=0 #1e-4 # Optimizer, default value 0.01 gradient_accumulation_steps=1 # Optimizer scheduler='cosineWithWarmUp' # warm up ratio 0.1 of total steps #speedup num_workers=0 non_blocking=True amp=True use_cudnn = True use_tpu = False #CNN structure channels = 32 reduction = 1.0 # no need to change below Config.model_output_folder = Config.output_dir + Config.model_version + "/" if not os.path.exists(Config.output_dir): os.mkdir(Config.output_dir) if not os.path.exists(Config.model_output_folder): os.mkdir(Config.model_output_folder) torch.backends.cudnn.benchmark = Config.use_cudnn display(Config.model_output_folder) # - # # wandb # + def save_object(obj, filename): with open(filename, 'wb') as outp: # Overwrites any existing file. pickle.dump(obj, outp, pickle.HIGHEST_PROTOCOL) def class2dict(f): return dict((name, getattr(f, name)) for name in dir(f) if not name.startswith('__')) save_object(class2dict(Config), Config.model_output_folder + "Config.pkl") # - # # Data path def id_2_path(file_id: str, train=True) -> str: if train: return "./output/whiten-train/{}.npy".format(file_id) else: return "./output/whiten-test/{}.npy".format(file_id) train_df = pd.read_csv('training_labels.csv') test_df = pd.read_csv('sample_submission.csv') if Config.debug: Config.epochs = 1 train_df = train_df.sample(n=50000, random_state=Config.seed).reset_index(drop=True) if Config.use_subset: train_df = train_df.sample(frac=Config.subset_frac, random_state=Config.seed).reset_index(drop=True) train_df['file_path'] = train_df['id'].apply(lambda x :id_2_path(x)) test_df['file_path'] = test_df['id'].apply(lambda x :id_2_path(x,False)) # checking magnitude of waves num_files = 5 input_file_paths = train_df['file_path'].values[:num_files] batch_waves=np.zeros((num_files,3,4096)) for i,input_file_path in enumerate(input_file_paths[:num_files]): file_name = input_file_path.split('/')[-1].split('.npy')[0] waves = np.load(input_file_path)#.astype(np.float32) # (3, 4096) # batch_waves[i,:] = np.array([waves.max(),np.abs(waves).max(),np.abs(waves).min()]) whitened_waves = waves#whiten(waves) print(whitened_waves[2][16]) # + # !! skf = StratifiedKFold(n_splits=Config.n_fold, shuffle=True, random_state=Config.seed) splits = skf.split(train_df, train_df["target"]) train_df['fold'] = -1 for fold, (train_index, valid_index) in enumerate(splits): train_df.loc[valid_index,"fold"] = fold train_df['fold_orig'] = train_df['fold'] train_df.groupby('fold')['target'].apply(lambda s: s.value_counts(normalize=True)) # - train_df # # Pseudo Labeling # + ## Decides Threshold based on previous oof pred distribution # need to work on the copy, tbs # if Config.use_pseudo_label: # print("Load Checkpoint, epo") # checkpoint = torch.load(Config.output_dir + 'SE_reduction1_SiLU/Fold_0_best_model.pth') # valid_preds = checkpoint['valid_preds'] # kf = StratifiedKFold(n_splits=Config.n_fold, shuffle=True, random_state=Config.seed) # train_df["fold"] = -1 # for fold, (train_index, valid_index) in enumerate(kf.split(train_df, train_df["target"])): # train_df.loc[valid_index,"fold"] = fold # # sanity check # def get_score(y_true, y_pred): # score = roc_auc_score(y_true, y_pred) # return score # y_true = train_df.query("fold == 0")["target"] # train_df.drop(columns=["fold"],inplace=True) # print(get_score(y_true, valid_preds)) # # get threshold # up_thresh = 0.9 # down_thresh = 0.1 # for up_thresh in range(6, 10): # up_thresh /= 10 # for down_thresh in range(4,0,-1): # down_thresh /= 10 # ratio_up = sum(valid_preds > up_thresh) / len(valid_preds) # ratio_down = sum(valid_preds < down_thresh) / len(valid_preds) # acc_up = np.mean(y_true[ valid_preds > up_thresh]) # acc_down = 1 - np.mean(y_true[ valid_preds < down_thresh]) # print("Under Threshold Up : {:.0%}, Down: {:.0%}".format(up_thresh, down_thresh)) # print("We can have Up sample: {:.1%}, Down sample: {:.1%}".format(ratio_up, ratio_down)) # print("Up Accuracy: {:.1%}, Down Accuracy: {:.1%}".format(acc_up, acc_down)) # print() # - # ## Add pseudo label # + if Config.use_pseudo_label: up_thresh = 0.9 down_thresh = 0.2 pseudo_label_df = pd.read_csv(Config.gdrive + "pseudo_label_2.csv") pseudo_label_df.head() num_test = pseudo_label_df.shape[0] num_yes = (pseudo_label_df["target"] >= up_thresh).sum() num_no = (pseudo_label_df["target"] <= down_thresh).sum() num_all = num_yes+num_no print("{:.2%} ratio, {:.2%} 1, {:.2%} 0".format(num_all/num_test, num_yes/num_test, num_no/num_test)) test_df_2 = pseudo_label_df[(pseudo_label_df["target"] >= up_thresh) | (pseudo_label_df["target"] <= down_thresh)].copy() test_df_2["target"] = (test_df_2["target"] > up_thresh).astype(int) test_df_2 = test_df_2.merge(test_df[["id","file_path"]],on="id",how="left") kf = StratifiedKFold(n_splits=Config.n_fold, shuffle=True, random_state=Config.seed) test_df_2['fold'] = -1 for fold, (train_index, valid_index) in enumerate(kf.split(test_df_2, test_df_2["target"])): test_df_2.loc[valid_index,"fold"] = fold train_df = pd.concat([train_df, test_df_2]).reset_index(drop=True) display(train_df.groupby('fold')['target'].apply(lambda s: s.value_counts(normalize=True))) train_df.reset_index(inplace=True, drop=True) display(train_df.shape) train_df # - # # Model # ## dataset # + transform_list = [] if Config.add_gaussian_noise: transform_list.append(A.AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5)) if Config.time_shift: transform_list.append(A.Shift(min_fraction=-512*1.0/4096, max_fraction=-1*1.0/4096, p=0.5,rollover=False))#<0 means shift towards left, fraction of total sound length # if Config.shift_channel: # transform_list.append() if Config.pitch_shift: transform_list.append(A.PitchShift(min_semitones=-1, max_semitones=1, p=0.5)) if Config.time_stretch: transform_list.append(A.TimeStretch(min_rate=0.98, max_rate=1.02,leave_length_unchanged=True, p=0.5)) if Config.timemask: transform_list.append(A.TimeMask(min_band_part=0.0, max_band_part=0.01, fade=False, p=0.5))#try 0.03 next time # if Config.vflip: # transform_list.append(A.PolarityInversion(p=0.5)) train_transform = A.Compose(transform_list) # test_transform = A.Compose([]) class DataRetriever(Dataset): def __init__(self, paths, targets, transforms=None): self.paths = paths self.targets = targets self.transforms = transforms # self.ta_augment = ta_Compose([ # ta_ShuffleChannels(), # ])#bad coding style start_time =time.time() array_shape = (len(self.paths),3,4096) self.data = np.zeros(array_shape,dtype=np.float32) for i,path in enumerate(self.paths): waves = np.load(path) self.data[i,:] = waves print(time.time()-start_time) def __len__(self): return len(self.paths) def __getitem__(self, index): # path = self.paths[index] # waves = np.load(path) if Config.cropping: waves = self.data[index][:,1792:3840+1] else: waves = self.data[index] if Config.divide_std: waves[0] *= 0.03058 waves[1] *= 0.03058 waves[2] *= 0.03096 if Config.shuffle_channels: if np.random.random()<0.5: np.random.shuffle(waves) if Config.shuffle01: if np.random.random()<0.5: waves[[0,1]]=waves[[1,0]] if Config.vflip: if np.random.random()<0.5: waves = -waves if self.transforms is not None: waves= self.transforms(waves,sample_rate=2048) waves = torch.from_numpy(waves) # if Config.ta:#on tensor, batch*channel*ts # waves = self.ta_augment(waves,sample_rate=2048) target = torch.tensor(self.targets[index],dtype=torch.float)#device=device, return (waves, target) class DataRetrieverTest(Dataset): def __init__(self, paths, targets, transforms=None): self.paths = paths self.targets = targets self.transforms = transforms array_shape = (len(self.paths),3,4096) self.data = np.zeros(array_shape,dtype=np.float32) for i,path in enumerate(self.paths): waves = np.load(path) self.data[i,:] = waves def __len__(self): return len(self.paths) def __getitem__(self, index): # path = self.paths[index] # waves = np.load(path) waves = self.data[index] if Config.divide_std: waves[0] *= 0.03058 waves[1] *= 0.03058 waves[2] *= 0.03096 if self.transforms is not None: waves= self.transforms(waves,sample_rate=2048) waves = torch.from_numpy(waves) target = torch.tensor(self.targets[index],dtype=torch.float)#device=device, return (waves, target) class DataRetrieverLRFinder(Dataset): def __init__(self, paths, targets, transforms=None): self.paths = paths self.targets = targets self.transforms = transforms # self.ta_augment = ta_Compose([ # ta_ShuffleChannels(), # ])#bad coding style # start_time =time.time() # array_shape = (len(self.paths),3,4096) # self.data = np.zeros(array_shape,dtype=np.float32) # for i,path in enumerate(self.paths): # waves = np.load(path) # self.data[i,:] = waves # print(time.time()-start_time) def __len__(self): return len(self.paths) def __getitem__(self, index): path = self.paths[index] waves = np.load(path) # waves = self.data[index] if Config.divide_std: waves[0] *= 0.03058 waves[1] *= 0.03058 waves[2] *= 0.03096 if Config.shuffle_channels: if np.random.random()<0.5: np.random.shuffle(waves) if Config.shuffle01: if np.random.random()<0.5: waves[[0,1]]=waves[[1,0]] if Config.vflip: if np.random.random()<0.5: waves = -waves if self.transforms is not None: waves= self.transforms(waves,sample_rate=2048) waves = torch.from_numpy(waves) # if Config.ta:#on tensor, batch*channel*ts # waves = self.ta_augment(waves,sample_rate=2048) target = torch.tensor(self.targets[index],dtype=torch.float)#device=device, return (waves, target) # - class GeM(nn.Module): ''' Code modified from the 2d code in https://amaarora.github.io/2020/08/30/gempool.html ''' def __init__(self, kernel_size=8, p=3, eps=1e-6): super(GeM,self).__init__() self.p = nn.Parameter(torch.ones(1)*p) self.kernel_size = kernel_size self.eps = eps def forward(self, x): return self.gem(x, p=self.p, eps=self.eps) def gem(self, x, p=3, eps=1e-6): return torch_functional.avg_pool1d(x.clamp(min=eps).pow(p), self.kernel_size).pow(1./p) def __repr__(self): return self.__class__.__name__ + \ '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + \ ', ' + 'eps=' + str(self.eps) + ')' # ## neural net # + #for SE----------------------------------------------------------------------- class SELayer(nn.Module): def __init__(self, channel, reduction): super(SELayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool1d(1) self.fc = nn.Sequential( nn.Linear(channel, int(channel // reduction), bias=False), nn.SiLU(inplace=True), nn.Linear(int(channel // reduction), channel, bias=False), nn.Sigmoid() ) def forward(self, x): b, c, _ = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1) return x * y.expand_as(x) class SEBasicBlock(nn.Module): def __init__(self, in_channels, out_channels, reduction,downsample=True): super(SEBasicBlock, self).__init__() if downsample: self.residual_function = nn.Sequential( nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), nn.SiLU(inplace=True), nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), SELayer(out_channels, reduction), nn.MaxPool1d(2,ceil_mode=True), # downsampling by 2 ) # self.shortcut = nn.Sequential( # nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), # nn.BatchNorm1d(out_channels), # nn.MaxPool1d(2,ceil_mode=True), # downsampling by 2 # )#skip layers in residual_function, can try simple MaxPool1d self.shortcut = nn.Sequential( nn.MaxPool1d(2,ceil_mode=True), # downsampling by 2 ) else: self.residual_function = nn.Sequential( nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), nn.SiLU(inplace=True), nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), SELayer(out_channels, reduction), ) # self.shortcut = nn.Sequential( # nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), # nn.BatchNorm1d(out_channels), # )#skip layers in residual_function, can try identity, i.e., nn.Sequential() self.shortcut = nn.Sequential() def forward(self, x): return nn.SiLU(inplace=True)(self.residual_function(x) + self.shortcut(x)) #------------------------------------------------------------------------------- class ResNet(nn.Module): def __init__(self, block, num_block): super().__init__() self.in_channels = Config.channels self.conv1 = nn.Sequential( nn.Conv1d(3, Config.channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(Config.channels), nn.SiLU(inplace=True)) self.conv2_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[0]) self.conv3_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[1]) self.conv4_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[2]) self.conv5_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[3]) self.conv6_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[4]) self.conv7_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[5]) self.conv8_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[6]) self.conv9_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[7]) self.conv10_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[8]) self.head = nn.Sequential(nn.Flatten(), # 8*channels nn.Linear(8*Config.channels, 64), nn.SiLU(inplace=True), nn.Dropout(p=.25),#after activation nn.Linear(64, 1), ) def _make_stage(self, block, out_channels, num_blocks): """one stage may contain more than one residual block Args: block: block type, basic block, bottle neck block, SE-type block etc out_channels: output depth channel number of this layer num_blocks: how many blocks for this stage Return: return a resnet stage """ downsample = [True] + [False] * (num_blocks - 1) layers = [] for i in range(num_blocks): layers.append(block(self.in_channels, out_channels,reduction=Config.reduction, downsample=downsample[i])) self.in_channels = out_channels return nn.Sequential(*layers) def forward(self, x): output = self.conv1(x) output = self.conv2_x(output) output = self.conv3_x(output) output = self.conv4_x(output) output = self.conv5_x(output) output = self.conv6_x(output) output = self.conv7_x(output) output = self.conv8_x(output) output = self.conv9_x(output) output = self.conv10_x(output) output = self.head(output) return output #--------------------------------------------------------------------------------------------------- class StochasticDepthBasicBlock(nn.Module): def __init__(self, in_channels, out_channels, reduction,downsample,p,is_train=True):#tbs for test data super(StochasticDepthBasicBlock, self).__init__() self.p = p self.is_train = is_train if downsample: self.residual_function = nn.Sequential( nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), nn.SiLU(inplace=True), nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), SELayer(out_channels, reduction), nn.MaxPool1d(2,ceil_mode=True), # downsampling by 2 ) self.shortcut = nn.Sequential( nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), nn.MaxPool1d(2,ceil_mode=True), # downsampling by 2 )#skip layers in residual_function, can try simple MaxPool1d else: self.residual_function = nn.Sequential( nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), nn.SiLU(inplace=True), nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), SELayer(out_channels, reduction), ) self.shortcut = nn.Sequential( nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(out_channels), )#skip layers in residual_function, can try identity, i.e., nn.Sequential() def survival(self): var = torch.bernoulli(torch.tensor(self.p).float()) return torch.equal(var,torch.tensor(1).float().to(var.device)) def forward(self, x): if self.is_train: if self.survival(): x = nn.SiLU(inplace=True)(self.residual_function(x) + self.shortcut(x)) else: x = self.shortcut(x) else: x = self.residual_function(x)*self.p+self.shortcut(x) #what's self.p right now? print("p",self.p) return x class StochasticDepthResNet(nn.Module): def __init__(self, block, num_block): super().__init__() self.in_channels = Config.channels self.conv1 = nn.Sequential( nn.Conv1d(3, Config.channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm1d(Config.channels), nn.SiLU(inplace=True)) self.step = (1-0.8)/(sum(num_block)-1) self.pl = 1 self.conv2_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[0]) self.conv3_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[1]) self.conv4_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[2]) self.conv5_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[3]) self.conv6_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[4]) self.conv7_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[5]) self.conv8_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[6]) self.conv9_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[7]) self.conv10_x = self._make_stage(block, out_channels=Config.channels, num_blocks=num_block[8]) self.head = nn.Sequential(nn.Flatten(), # 8*channels nn.Linear(8*Config.channels, 64), nn.SiLU(inplace=True), nn.Dropout(p=.25),#after activation nn.Linear(64, 1), ) def _make_stage(self, block, out_channels, num_blocks): """one stage may contain more than one residual block Args: block: block type, basic block, bottle neck block, SE-type block etc out_channels: output depth channel number of this layer num_blocks: how many blocks for this stage Return: return a resnet stage """ downsample = [True] + [False] * (num_blocks - 1) layers = [] for i in range(num_blocks): layers.append(block(self.in_channels, out_channels,reduction=Config.reduction,downsample=downsample[i],p=self.pl)) self.in_channels = out_channels self.pl -= self.step return nn.Sequential(*layers) def forward(self, x): output = self.conv1(x) output = self.conv2_x(output) output = self.conv3_x(output) output = self.conv4_x(output) output = self.conv5_x(output) output = self.conv6_x(output) output = self.conv7_x(output) output = self.conv8_x(output) output = self.conv9_x(output) output = self.conv10_x(output) output = self.head(output) return output class ModelCNN_Dilations(nn.Module): """1D convolutional neural network with dilations. Classifier of the gravitaitonal waves Inspired by the https://arxiv.org/pdf/1904.08693.pdf """ def __init__(self): super().__init__() self.init_conv = nn.Sequential(nn.Conv1d(3, 256, kernel_size=1), nn.ReLU()) self.convs = nn.ModuleList( [ nn.Sequential( nn.Conv1d(256, 256, kernel_size=2, dilation=2 ** i), nn.ReLU(), ) for i in range(11) ] ) self.out_conv = nn.Sequential(nn.Conv1d(256, 1, kernel_size=1), nn.ReLU()) self.fc = nn.Linear(2049, 1) def forward(self, x): x = self.init_conv(x) for conv in self.convs: x = conv(x) x = self.out_conv(x) x = self.fc(x) x.squeeze_(1) return x class Model1DCNN(nn.Module): """1D convolutional neural network. Classifier of the gravitational waves. Architecture from there https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.120.141103 """ def __init__(self, initial_channnels=8): super().__init__() self.cnn1 = nn.Sequential( nn.Conv1d(3, initial_channnels, kernel_size=64), nn.BatchNorm1d(initial_channnels), nn.ELU(), ) self.cnn2 = nn.Sequential( nn.Conv1d(initial_channnels, initial_channnels, kernel_size=32), nn.MaxPool1d(kernel_size=8), nn.BatchNorm1d(initial_channnels), nn.ELU(), ) self.cnn3 = nn.Sequential( nn.Conv1d(initial_channnels, initial_channnels * 2, kernel_size=32), nn.BatchNorm1d(initial_channnels * 2), nn.ELU(), ) self.cnn4 = nn.Sequential( nn.Conv1d(initial_channnels * 2, initial_channnels * 2, kernel_size=16), nn.MaxPool1d(kernel_size=6), nn.BatchNorm1d(initial_channnels * 2), nn.ELU(), ) self.cnn5 = nn.Sequential( nn.Conv1d(initial_channnels * 2, initial_channnels * 4, kernel_size=16), nn.BatchNorm1d(initial_channnels * 4), nn.ELU(), ) self.cnn6 = nn.Sequential( nn.Conv1d(initial_channnels * 4, initial_channnels * 4, kernel_size=16), nn.MaxPool1d(kernel_size=4), nn.BatchNorm1d(initial_channnels * 4), nn.ELU(), ) if Config.cropping: fm_size = tbd else: fm_size = 11 self.fc1 = nn.Sequential( nn.Linear(initial_channnels * 4 * fm_size, 64), nn.BatchNorm1d(64), nn.Dropout(0.5), nn.ELU(), ) self.fc2 = nn.Sequential( nn.Linear(64, 64), nn.BatchNorm1d(64), nn.Dropout(0.5), nn.ELU(), ) self.fc3 = nn.Sequential( nn.Linear(64, 1), ) def forward(self, x): x = self.cnn1(x) x = self.cnn2(x) x = self.cnn3(x) x = self.cnn4(x) x = self.cnn5(x) x = self.cnn6(x) # print(x.shape) x = x.flatten(1) # x = x.mean(-1) # x = torch.cat([x.mean(-1), x.max(-1)[0]]) x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x class Model1DCNNGEM(nn.Module): """1D convolutional neural network. Classifier of the gravitational waves. Architecture from there https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.120.141103 """ def __init__(self, initial_channnels=8): super().__init__() self.cnn1 = nn.Sequential( nn.Conv1d(3, initial_channnels, kernel_size=64), nn.BatchNorm1d(initial_channnels), nn.ELU(), ) self.cnn2 = nn.Sequential( nn.Conv1d(initial_channnels, initial_channnels, kernel_size=32), GeM(kernel_size=8), nn.BatchNorm1d(initial_channnels), nn.ELU(), ) self.cnn3 = nn.Sequential( nn.Conv1d(initial_channnels, initial_channnels * 2, kernel_size=32), nn.BatchNorm1d(initial_channnels * 2), nn.ELU(), ) self.cnn4 = nn.Sequential( nn.Conv1d(initial_channnels * 2, initial_channnels * 2, kernel_size=16), GeM(kernel_size=6), nn.BatchNorm1d(initial_channnels * 2), nn.ELU(), ) self.cnn5 = nn.Sequential( nn.Conv1d(initial_channnels * 2, initial_channnels * 4, kernel_size=16), nn.BatchNorm1d(initial_channnels * 4), nn.ELU(), ) self.cnn6 = nn.Sequential( nn.Conv1d(initial_channnels * 4, initial_channnels * 4, kernel_size=16), GeM(kernel_size=4), nn.BatchNorm1d(initial_channnels * 4), nn.ELU(), ) if Config.cropping: fm_size = tbd else: fm_size = 11 self.fc1 = nn.Sequential( nn.Linear(initial_channnels * 4 * fm_size, 64), nn.BatchNorm1d(64), nn.Dropout(0.5), nn.ELU(), ) self.fc2 = nn.Sequential( nn.Linear(64, 64), nn.BatchNorm1d(64), nn.Dropout(0.5), nn.ELU(), ) self.fc3 = nn.Sequential( nn.Linear(64, 1), ) def forward(self, x): x = self.cnn1(x) x = self.cnn2(x) x = self.cnn3(x) x = self.cnn4(x) x = self.cnn5(x) x = self.cnn6(x) # print(x.shape) x = x.flatten(1) # x = x.mean(-1) # x = torch.cat([x.mean(-1), x.max(-1)[0]]) x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x # - # def Model(): # """ return a skip connection network # """ # return ResNet(SEBasicBlock, [1,1,1,1,1,1,1,1,1]) # def Model(): # """ return a StochasticDepthResNet network # """ # return StochasticDepthResNet(StochasticDepthBasicBlock, [1,2,1,1,1,1,1,1,1]) # def Model(): # return Model1DCNN(Config.channels) def Model(): return Model1DCNNGEM(Config.channels) def get_n_params(model): pp=0 for p in list(model.parameters()): nn=1 for s in list(p.size()): nn = nn*s pp += nn return pp model = Model()#can possibly call random get_n_params(model) # ## Util # + def get_score(y_true, y_pred): score = roc_auc_score(y_true, y_pred) return score def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_torch(seed=Config.seed) def get_scheduler(optimizer, train_size): if Config.scheduler=='ReduceLROnPlateau': scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=Config.factor, patience=Config.patience, verbose=True, eps=Config.eps) elif Config.scheduler=='CosineAnnealingLR': scheduler = CosineAnnealingLR(optimizer, T_max=Config.T_max, eta_min=Config.min_lr, last_epoch=-1) elif Config.scheduler=='CosineAnnealingWarmRestarts': scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=Config.T_0, T_mult=1, eta_min=Config.min_lr, last_epoch=-1) elif Config.scheduler=='CyclicLR': iter_per_ep = train_size/Config.batch_size step_size_up = int(iter_per_ep*Config.step_up_epochs) step_size_down=int(iter_per_ep*Config.step_down_epochs) scheduler = CyclicLR(optimizer, base_lr=Config.base_lr, max_lr=Config.max_lr, step_size_up=step_size_up, step_size_down=step_size_down, mode=Config.mode, gamma=Config.cycle_decay**(1/(step_size_up+step_size_down)), cycle_momentum=False) elif Config.scheduler == 'cosineWithWarmUp': epoch_step = train_size/Config.batch_size num_warmup_steps = int(0.1 * epoch_step * Config.epochs) num_training_steps = int(epoch_step * Config.epochs) scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) return scheduler def mixed_criterion(loss_fn, pred, y_a, y_b, lam): return lam * loss_fn(pred, y_a) + (1 - lam) * loss_fn(pred, y_b) def mixup_data(x, y, alpha=1.0): """Returns mixed inputs, pairs of targets, and lambda""" lam = np.random.beta(alpha, alpha) batch_size = x.size()[0] index = torch.randperm(batch_size, requires_grad=False).to(x.device,non_blocking=Config.non_blocking) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam # + # setting device on GPU if available, else CPU if Config.use_tpu: device = xm.xla_device() else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # device = torch.device('cpu')#for debug, tb see print('Using device:', device) print() #Additional Info when using cuda # watch nvidia-smi if device.type == 'cuda': print(torch.cuda.get_device_name(0)) print('Memory Usage:') print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB') print('Reserved: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB') # - # ## LR Finder # + class LRFinder: def __init__(self, model, optimizer, criterion, device): self.optimizer = optimizer self.model = model self.criterion = criterion self.device = device torch.save(model.state_dict(), f'{Config.model_output_folder}/init_params.pt') def range_test(self, loader, end_lr = 10, num_iter = 100, smooth_f = 0.05, diverge_th = 5): lrs = [] losses = [] best_loss = float('inf') lr_scheduler = ExponentialLR(self.optimizer, end_lr, num_iter) for step, batch in enumerate(loader): if step == num_iter: break loss = self._train_batch(batch) lrs.append(lr_scheduler.get_last_lr()[0]) #update lr lr_scheduler.step() if step > 0: loss = smooth_f * loss + (1 - smooth_f) * losses[-1] if loss < best_loss: best_loss = loss losses.append(loss) if loss > diverge_th * best_loss: print("Stopping early, the loss has diverged") break #reset model to initial parameters model.load_state_dict(torch.load(f'{Config.model_output_folder}/init_params.pt')) return lrs, losses def _train_batch(self, batch): self.model.train() self.optimizer.zero_grad() scaler = GradScaler() X = batch[0].to(self.device,non_blocking=Config.non_blocking) targets = batch[1].to(self.device,non_blocking=Config.non_blocking) if Config.use_mixup: (X_mix, targets_a, targets_b, lam) = mixup_data( X, targets, Config.mixup_alpha ) with autocast(): outputs = self.model(X_mix).squeeze() loss = mixed_criterion(self.criterion, outputs, targets_a, targets_b, lam) else: with autocast(): outputs = self.model(X).squeeze() loss = self.criterion(outputs, targets) #loss.backward() scaler.scale(loss).backward() if Config.use_tpu: xm.optimizer_step(self.optimizer, barrier=True) # Note: TPU-specific code! else: scaler.step(self.optimizer) scaler.update() # self.optimizer.step() return loss.item() class ExponentialLR(_LRScheduler): def __init__(self, optimizer, end_lr, num_iter, last_epoch=-1): self.end_lr = end_lr self.num_iter = num_iter super(ExponentialLR, self).__init__(optimizer, last_epoch) def get_lr(self): curr_iter = self.last_epoch r = curr_iter / self.num_iter return [base_lr * (self.end_lr / base_lr) ** r for base_lr in self.base_lrs] def plot_lr_finder(lrs, losses, skip_start = 0, skip_end = 0): if skip_end == 0: lrs = lrs[skip_start:] losses = losses[skip_start:] else: lrs = lrs[skip_start:-skip_end] losses = losses[skip_start:-skip_end] fig = plt.figure(figsize = (16,8)) ax = fig.add_subplot(1,1,1) ax.plot(lrs, losses) ax.set_xscale('log') ax.set_xlabel('Learning rate') ax.set_ylabel('Loss') ax.grid(True, 'both', 'x') plt.show() # - if Config.use_lr_finder: START_LR = 1e-7 model = Model() model.to(device,non_blocking=Config.non_blocking) optimizer = AdamW(model.parameters(), lr=START_LR, weight_decay=Config.weight_decay, amsgrad=False) criterion = torch_functional.binary_cross_entropy_with_logits train_data_retriever = DataRetrieverLRFinder(train_df['file_path'], train_df["target"].values) train_loader = DataLoader(train_data_retriever, batch_size=Config.batch_size, shuffle=True, num_workers=Config.num_workers, pin_memory=True, drop_last=True) # %%time if Config.use_lr_finder: try: END_LR = 10 NUM_ITER = 200 lr_finder = LRFinder(model, optimizer, criterion, device) lrs, losses = lr_finder.range_test(train_loader, END_LR, NUM_ITER) except RuntimeError as e: del model, optimizer, criterion, train_data_retriever, train_loader, lr_finder gc.collect() torch.cuda.empty_cache() print(e) if Config.use_lr_finder: plot_lr_finder(lrs[:-28], losses[:-28]) # ## Trainer # + class Trainer: def __init__( self, model, device, optimizer, criterion, scheduler, valid_labels, best_valid_score, fold, ): self.model = model self.device = device self.optimizer = optimizer self.criterion = criterion self.scheduler = scheduler self.best_valid_score = best_valid_score self.valid_labels = valid_labels self.fold = fold def fit(self, epochs, train_loader, valid_loader, save_path): train_losses = [] valid_losses = [] for n_epoch in range(epochs): start_time = time.time() print('Epoch: ', n_epoch) train_loss, train_preds = self.train_epoch(train_loader) valid_loss, valid_preds = self.valid_epoch(valid_loader) train_losses.append(train_loss) valid_losses.append(valid_loss) if isinstance(self.scheduler, ReduceLROnPlateau): self.scheduler.step(valid_loss) valid_score = get_score(self.valid_labels, valid_preds) numbers = valid_score filename = Config.model_output_folder+f'score_epoch_{n_epoch}.json' with open(filename, 'w') as file_object: json.dump(numbers, file_object) if self.best_valid_score < valid_score: self.best_valid_score = valid_score self.save_model(n_epoch, save_path+f'best_model.pth', train_preds, valid_preds) print('train_loss: ',train_loss) print('valid_loss: ',valid_loss) print('valid_score: ',valid_score) print('best_valid_score: ',self.best_valid_score) print('time used: ', time.time()-start_time) wandb.log({f"[fold{self.fold}] epoch": n_epoch+1, f"[fold{self.fold}] avg_train_loss": train_loss, f"[fold{self.fold}] avg_val_loss": valid_loss, f"[fold{self.fold}] val_score": valid_score}) # fig,ax = plt.subplots(1,1,figsize=(15,7)) # ax.plot(list(range(epochs)), train_losses, label="train_loss") # ax.plot(list(range(epochs)), valid_losses, label="val_loss") # fig.legend() # plt.show() def train_epoch(self, train_loader): if Config.amp: scaler = GradScaler() self.model.train() losses = [] train_loss = 0 # preds = [] for step, batch in enumerate(train_loader, 1): self.optimizer.zero_grad() X = batch[0].to(self.device,non_blocking=Config.non_blocking) targets = batch[1].to(self.device,non_blocking=Config.non_blocking) if Config.use_mixup: (X_mix, targets_a, targets_b, lam) = mixup_data( X, targets, Config.mixup_alpha ) with autocast(): outputs = self.model(X_mix).squeeze() loss = mixed_criterion(self.criterion, outputs, targets_a, targets_b, lam) else: with autocast(): outputs = self.model(X).squeeze() loss = self.criterion(outputs, targets) if Config.gradient_accumulation_steps > 1: loss = loss / Config.gradient_accumulation_steps scaler.scale(loss).backward() if (step) % Config.gradient_accumulation_steps == 0: scaler.step(self.optimizer) scaler.update() if (not isinstance(self.scheduler, ReduceLROnPlateau)): self.scheduler.step() # preds.append(outputs.sigmoid().to('cpu').detach().numpy()) loss2 = loss.detach() wandb.log({f"[fold{self.fold}] loss": loss2, f"[fold{self.fold}] lr": self.scheduler.get_last_lr()[0]}) # losses.append(loss2.item()) losses.append(loss2) train_loss += loss2 if (step) % Config.print_num_steps == 0: train_loss = train_loss.item() #synch once per print_num_steps instead of once per batch print(f'[{step}/{len(train_loader)}] ', f'avg loss: ',train_loss/step, f'inst loss: ', loss2.item()) # predictions = np.concatenate(preds) # losses_avg = [] # for i, loss in enumerate(losses): # if i == 0 : # losses_avg.append(loss) # else: # losses_avg.append(losses_avg[-1] * 0.6 + loss * 0.4) # losses = torch.stack(losses) # losses_avg = torch.stack(losses_avg) # fig,ax = plt.subplots(1,1,figsize=(15,7)) # ax.plot(list(range(step)), losses, label="train_loss per step") # ax.plot(list(range(step)), losses_avg, label="train_loss_avg per step") # fig.legend() # plt.show() return train_loss / step, None#, predictions def valid_epoch(self, valid_loader): self.model.eval() valid_loss = [] preds = [] for step, batch in enumerate(valid_loader, 1): with torch.no_grad(): X = batch[0].to(self.device,non_blocking=Config.non_blocking) targets = batch[1].to(self.device,non_blocking=Config.non_blocking) outputs = self.model(X).squeeze() loss = self.criterion(outputs, targets) if Config.gradient_accumulation_steps > 1: loss = loss / Config.gradient_accumulation_steps valid_loss.append(loss.detach().item()) preds.append(outputs.sigmoid().to('cpu').numpy()) # valid_loss.append(loss.detach())#.item()) # preds.append(outputs.sigmoid())#.to('cpu').numpy()) # valid_loss = torch.cat(valid_loss).to('cpu').numpy() # predictions = torch.cat(preds).to('cpu').numpy() predictions = np.concatenate(preds) return np.mean(valid_loss), predictions def save_model(self, n_epoch, save_path, train_preds, valid_preds): torch.save( { "model_state_dict": self.model.state_dict(), "optimizer_state_dict": self.optimizer.state_dict(), "best_valid_score": self.best_valid_score, "n_epoch": n_epoch, 'scheduler': self.scheduler.state_dict(), 'train_preds': train_preds, 'valid_preds': valid_preds, }, save_path, ) # - # # Training loop seed_torch(seed=Config.seed) def training_loop(use_checkpoint=Config.use_checkpoint): kf = StratifiedKFold(n_splits=Config.n_fold, shuffle=True, random_state=Config.seed) avg_best_valid_score = 0 folds_val_score = [] for fold in range(Config.n_fold): train_index, valid_index = train_df.query(f"fold!={fold}").index, train_df.query(f"fold_orig=={fold}").index print('Fold: ', fold) if fold not in Config.train_folds: print("skip") continue train_X, valid_X = train_df.loc[train_index], train_df.loc[valid_index] valid_labels = train_df.loc[valid_index,Config.target_col].values # fold_indices = pd.read_csv(f'{Config.gdrive}/Fold_{fold}_indices.csv')#saved fold ids oof = pd.DataFrame() oof['id'] = train_df.loc[valid_index,'id'] oof['id'] = valid_X['id'].values.copy() oof = oof.reset_index() # assert oof['id'].eq(fold_indices['id']).all() # if not Config.use_subset: # assert oof['id'].eq(fold_indices['id']).sum()==112000 oof['target'] = valid_labels oof.to_csv(f'{Config.model_output_folder}/Fold_{fold}_oof_pred.csv') # continue # uncomment this is to check oof ids print('training data samples, val data samples: ', len(train_X) ,len(valid_X)) train_data_retriever = DataRetriever(train_X["file_path"].values, train_X["target"].values, transforms=train_transform)#how to run this only once and use for next experiment? valid_data_retriever = DataRetrieverTest(valid_X["file_path"].values, valid_X["target"].values, transforms=test_transform) train_loader = DataLoader(train_data_retriever, batch_size=Config.batch_size, shuffle=True, num_workers=Config.num_workers, pin_memory=True, drop_last=False) valid_loader = DataLoader(valid_data_retriever, batch_size=Config.batch_size * 2, shuffle=False, num_workers=Config.num_workers, pin_memory=True, drop_last=False) model = Model() model.to(device,non_blocking=Config.non_blocking) optimizer = AdamW(model.parameters(), lr=Config.lr, weight_decay=Config.weight_decay, amsgrad=False) scheduler = get_scheduler(optimizer, len(train_X)) best_valid_score = -np.inf if use_checkpoint: print("Load Checkpoint, epo") checkpoint = torch.load(f'{Config.model_output_folder}/Fold_{fold}_best_model.pth') model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) best_valid_score = float(checkpoint['best_valid_score']) scheduler.load_state_dict(checkpoint['scheduler']) criterion = torch_functional.binary_cross_entropy_with_logits trainer = Trainer( model, device, optimizer, criterion, scheduler, valid_labels, best_valid_score, fold ) history = trainer.fit( epochs=Config.epochs, train_loader=train_loader, valid_loader=valid_loader, save_path=f'{Config.model_output_folder}/Fold_{fold}_', ) folds_val_score.append(trainer.best_valid_score) wandb.finish() print('folds score:', folds_val_score) print("Avg: {:.5f}".format(np.mean(folds_val_score))) print("Std: {:.5f}".format(np.std(folds_val_score))) # # Weight & Bias Initialization wandb.login(key="1b0833b15e81d54fad9cfbbe3d923f57562a6f89") job_type= "debug" if Config.debug else "train" # run = wandb.init(project="G2Net", name=Config.model_version, config=class2dict(Config), group=Config.model_name, job_type=job_type) run = wandb.init(project="G2Net", name=Config.model_version, config=class2dict(Config), group=Config.model_name, job_type=Config.model_version) # # Train # %%time try: # %lprun -f DataRetriever.__getitem__ -f Trainer.train_epoch -f Trainer.fit -f Trainer.valid_epoch training_loop() # training_loop(Config.use_checkpoint) except RuntimeError as e: gc.collect() torch.cuda.empty_cache() print(e)# saving oof predictions # # Pause from jarviscloud import jarviscloud jarviscloud.pause() for fold in Config.train_folds: print(fold) checkpoint = torch.load(f'{Config.model_output_folder}/Fold_{fold}_best_model.pth') # print(checkpoint['valid_preds']) try: # oof = pd.read_csv(f'{Config.gdrive}/Fold_{fold}_indices.csv') also works, used in replacement of next statement for previously not generated Fold_{fold}_oof_pred.csv oof = pd.read_csv(f'{Config.model_output_folder}/Fold_{fold}_oof_pred.csv') oof['pred'] = checkpoint['valid_preds'] oof.to_csv(f'{Config.model_output_folder}/Fold_{fold}_oof_pred.csv') print('successfully saved oof predictions for Fold: ', fold) except: raise RuntimeError('failure in saving predictions for Fold: ', fold) # # add TTA # + # dataset # - class TTA(Dataset): def __init__(self, paths, targets, use_vflip=False, shuffle_channels=False, time_shift=False, add_gaussian_noise = False, time_stretch=False,shuffle01=False ): self.paths = paths self.targets = targets self.use_vflip = use_vflip self.shuffle_channels = shuffle_channels self.time_shift = time_shift self.gaussian_noise = add_gaussian_noise self.time_stretch = time_stretch self.shuffle01 = shuffle01 if time_shift: self.time_shift = A.Shift(min_fraction=-512*1.0/4096, max_fraction=-1.0/4096, p=1,rollover=False) if add_gaussian_noise: self.gaussian_noise = A.AddGaussianNoise(min_amplitude=0.001, max_amplitude= 0.015, p=1) if time_stretch: self.time_stretch = A.TimeStretch(min_rate=0.9, max_rate=1.111,leave_length_unchanged=True, p=1) def __len__(self): return len(self.paths) def __getitem__(self, index): path = self.paths[index] waves = np.load(path) if Config.divide_std: waves[0] *= 0.03058 waves[1] *= 0.03058 waves[2] *= 0.03096 if self.use_vflip: waves = -waves if self.shuffle_channels: np.random.shuffle(waves) if self.time_shift: waves = self.time_shift(waves, sample_rate=2048) if self.gaussian_noise: waves = self.gaussian_noise(waves, sample_rate=2048) if self.time_stretch: waves = self.time_stretch(waves, sample_rate=2048) if self.shuffle01: waves[[0,1]] = waves[[1,0]] waves = torch.from_numpy(waves) target = torch.tensor(self.targets[index],dtype=torch.float)#device=device, return (waves, target) # + ## functions for making predictions # + def get_pred(loader,model): preds = [] for step, batch in enumerate(loader, 1): if step % Config.print_num_steps == 0: print("step {}/{}".format(step, len(loader))) with torch.no_grad(): X = batch[0].to(device,non_blocking=Config.non_blocking) outputs = model(X).squeeze() preds.append(outputs.sigmoid().to('cpu').numpy()) predictions = np.concatenate(preds) return predictions def get_tta_pred(df,model,**transforms): data_retriever = TTA(df['file_path'].values, df['target'].values, **transforms) loader = DataLoader(data_retriever, batch_size=Config.batch_size * 2, shuffle=False, num_workers=Config.num_workers, pin_memory=True, drop_last=False) return get_pred(loader,model) # + ##TTA for oof # + model = Model() oof_all = pd.DataFrame() for fold in Config.train_folds: oof = train_df.query(f"fold=={fold}").copy() oof['preds'] = torch.load(f'{Config.model_output_folder}/Fold_{fold}_best_model.pth')['valid_preds'] oof['file_path'] = train_df['id'].apply(lambda x :id_2_path(x)) # display(oof) checkpoint = torch.load(f'{Config.model_output_folder}/Fold_{fold}_best_model.pth') model.load_state_dict(checkpoint['model_state_dict']) model.to(device=device,non_blocking=Config.non_blocking) model.eval() oof["tta_vflip"] = get_tta_pred(oof,model,use_vflip=True) # oof["tta_shift"] = get_tta_pred(oof,model,time_shift=True) # oof["tta_vflip_shift"] = get_tta_pred(oof,model,use_vflip=True,time_shift=True) oof["tta_shuffle01"] = get_tta_pred(oof,model,shuffle01=True) oof["tta_vflip_shuffle01"] = get_tta_pred(oof,model,use_vflip=True,shuffle01=True) # oof["tta_shift_shuffle01"] = get_tta_pred(oof,model,time_shift=True,shuffle01=True) # oof["tta_vflip_shift_shuffle01"] = get_tta_pred(oof,model,use_vflip=True,time_shift=True,shuffle01=True) oof.to_csv(Config.model_output_folder + f"/oof_Fold_{fold}.csv", index=False) oof_all = pd.concat([oof_all,oof]) # + print("Original:",roc_auc_score(oof_all['target'], oof_all['preds'])) for col in oof.columns: if "tta" in col: print(col,roc_auc_score(oof_all['target'], oof_all[col])) oof_all['avg']=0 count = 0 for col in oof_all.columns: if "tta" in col or 'preds' in col: count+=1 oof_all['avg'] += oof_all[col] oof_all['avg'] /= count print("preds_tta_avg:",roc_auc_score(oof_all['target'], oof_all['avg'])) oof_all.to_csv(Config.model_output_folder + "/oof_all.csv", index=False) oof_all[['id','fold','avg']].rename(columns={'id':'id','fold':'fold','avg':'prediction'}).to_csv(Config.model_output_folder + "/oof_final.csv", index=False) # + ## TTA for test # + # %%time test_df['target'] = 0 model = Model() test_avg = test_df[['id', 'target']].copy() for fold in Config.train_folds: test_df2 = test_df.copy() checkpoint = torch.load(f'{Config.model_output_folder}/Fold_{fold}_best_model.pth') model.load_state_dict(checkpoint['model_state_dict']) model.to(device=device,non_blocking=Config.non_blocking) model.eval() test_df2['preds'+f'_Fold_{fold}'] = get_tta_pred(test_df2,model) test_df2["tta_vflip"+f'_Fold_{fold}'] = get_tta_pred(test_df2,model,use_vflip=True) # test_df2["tta_shift"+f'_Fold_{fold}'] = get_tta_pred(test_df2,model,time_shift=True) # test_df2["tta_vflip_shift"+f'_Fold_{fold}'] = get_tta_pred(test_df2,model,use_vflip=True,time_shift=True) test_df2["tta_shuffle01"+f'_Fold_{fold}'] = get_tta_pred(test_df2,model,shuffle01=True) test_df2["tta_vflip_shuffle01"+f'_Fold_{fold}'] = get_tta_pred(test_df2,model,use_vflip=True,shuffle01=True) # test_df2["tta_shift_shuffle01"+f'_Fold_{fold}'] = get_tta_pred(test_df2,model,time_shift=True,shuffle01=True) # test_df2["tta_vflip_shift_shuffle01"+f'_Fold_{fold}'] = get_tta_pred(test_df2,model,use_vflip=True,time_shift=True,shuffle01=True) test_df2.to_csv(Config.model_output_folder + f"/test_Fold_{fold}.csv", index=False) count = 0 for col in test_df2.columns: if "tta" in col or 'preds' in col: count+=1 test_avg['target'] += test_df2[col]/len(Config.train_folds) test_avg['target'] /= count test_avg.to_csv(Config.model_output_folder + "/test_avg.csv", index=False) #just used vflip here # + # Create Submission File # + test_avg[['id', 'target']].to_csv("./submission.csv", index=False) test_avg[['id', 'target']].to_csv(Config.model_output_folder + "/submission.csv", index=False) # !mkdir -p ~/.kaggle/ && cp $Config.kaggle_json_path ~/.kaggle/ && chmod 600 ~/.kaggle/kaggle.json # + # # !kaggle competitions submit -c g2net-gravitational-wave-detection -f ./submission.csv -m $Config.model_version # + # test_avg # -
1D_Model/notebooks/Richard_Models/TCNN_jarvis_35th.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=["remove_cell"] # # Simon's Algorithm # - # In this section, we first introduce the Simon problem, and classical and quantum algorithms to solve it. We then implement the quantum algorithm using Qiskit, and run on a simulator and device. # # # ## Contents # # 1. [Introduction](#introduction) # 1.1 [Simon's Problem](#problem) # 1.2 [Simon's Algorithm](#algorithm) # 2. [Example](#example) # 3. [Qiskit Implementation](#implementation) # 3.1 [Simulation](#simulation) # 3.2 [Device](#device) # 4. [Oracle](#oracle) # 5. [Problems](#problems) # 6. [References](#references) # ## 1. Introduction <a id='introduction'></a> # # Simon's algorithm, first introduced in Reference [1], was the first quantum algorithm to show an exponential speed-up versus the best classical algorithm in solving a specific problem. This inspired the quantum algorithms based on the quantum Fourier transform, which is used in the most famous quantum algorithm: Shor's factoring algorithm. # # ### 1a. Simon's Problem <a id='problem'> </a> # # We are given an unknown blackbox function $f$, which is guaranteed to be either one-to-one ($1:1$) or two-to-one ($2:1$), where one-to-one and two-to-one functions have the following properties: # # - **one-to-one**: maps exactly one unique output for every input. An example with a function that takes 4 inputs is: # # $$f(1) \rightarrow 1, \quad f(2) \rightarrow 2, \quad f(3) \rightarrow 3, \quad f(4) \rightarrow 4$$ # # - **two-to-one**: maps exactly two inputs to every unique output. An example with a function that takes 4 inputs is: # # $$f(1) \rightarrow 1, \quad f(2) \rightarrow 2, \quad f(3) \rightarrow 1, \quad f(4) \rightarrow 2$$ # # This two-to-one mapping is according to a hidden bitstring, $b$, where: # # $$ # \textrm{given }x_1,x_2: \quad f(x_1) = f(x_2) \\ # \textrm{it is guaranteed }: \quad x_1 \oplus x_2 = b # $$ # # Given this blackbox $f$, how quickly can we determine if $f$ is one-to-one or two-to-one? Then, if $f$ turns out to be two-to-one, how quickly can we determine $b$? As it turns out, both cases boil down to the same problem of finding $b$, where a bitstring of $b={000...}$ represents the one-to-one $f$. # ### 1b. Simon's Algorithm <a id='algorithm'> </a> # # #### Classical Solution # # Classically, if we want to know what $b$ is with 100% certainty for a given $f$, we have to check up to $2^{n−1}+1$ inputs, where n is the number of bits in the input. This means checking just over half of all the possible inputs until we find two cases of the same output. Much like the Deutsch-Jozsa problem, if we get lucky, we could solve the problem with our first two tries. But if we happen to get an $f$ that is one-to-one, or get _really_ unlucky with an $f$ that’s two-to-one, then we’re stuck with the full $2^{n−1}+1$. # There are known algorithms that have a lower bound of $\Omega(2^{n/2})$ (see Reference 2 below), but generally speaking the complexity grows exponentially with n. # #### Quantum Solution # # The quantum circuit that implements Simon's algorithm is shown below. # # ![image1](images/simon_steps.jpeg) # # Where the query function, $\text{Q}_f$ acts on two quantum registers as: # # # $$ \lvert x \rangle \lvert a \rangle \rightarrow \lvert x \rangle \lvert a \oplus f(x) \rangle $$ # # In the specific case that the second register is in the state $|0\rangle = |00\dots0\rangle$ we have: # # $$ \lvert x \rangle \lvert 0 \rangle \rightarrow \lvert x \rangle \lvert f(x) \rangle $$ # # The algorithm involves the following steps. # <ol> # <li> Two $n$-qubit input registers are initialized to the zero state: # # # $$\lvert \psi_1 \rangle = \lvert 0 \rangle^{\otimes n} \lvert 0 \rangle^{\otimes n} $$ # # </li> # # <li> Apply a Hadamard transform to the first register: # # # $$\lvert \psi_2 \rangle = \frac{1}{\sqrt{2^n}} \sum_{x \in \{0,1\}^{n} } \lvert x \rangle\lvert 0 \rangle^{\otimes n} $$ # # # </li> # # <li> Apply the query function $\text{Q}_f$: # # # $$ \lvert \psi_3 \rangle = \frac{1}{\sqrt{2^n}} \sum_{x \in \{0,1\}^{n} } \lvert x \rangle \lvert f(x) \rangle $$ # # # </li> # # <li> Measure the second register. A certain value of $f(x)$ will be observed. Because of the setting of the problem, the observed value $f(x)$ could correspond to two possible inputs: $x$ and $y = x \oplus b $. Therefore the first register becomes: # # # $$\lvert \psi_4 \rangle = \frac{1}{\sqrt{2}} \left( \lvert x \rangle + \lvert y \rangle \right)$$ # # # where we omitted the second register since it has been measured. # </li> # # <li> Apply Hadamard on the first register: # # # $$ \lvert \psi_5 \rangle = \frac{1}{\sqrt{2^{n+1}}} \sum_{z \in \{0,1\}^{n} } \left[ (-1)^{x \cdot z} + (-1)^{y \cdot z} \right] \lvert z \rangle $$ # # # </li> # # <li> Measuring the first register will give an output only if: # # # $$ (-1)^{x \cdot z} = (-1)^{y \cdot z} $$ # # # which means: # $$ x \cdot z = y \cdot z \\ # x \cdot z = \left( x \oplus b \right) \cdot z \\ # x \cdot z = x \cdot z \oplus b \cdot z \\ # b \cdot z = 0 \text{ (mod 2)} $$ # # A string $z$ will be measured, whose inner product with $b = 0$. Thus, repeating the algorithm $\approx n$ times, we will be able to obtain $n$ different values of $z$ and the following system of equation can be written: # # # # $$ \begin{cases} b \cdot z_1 = 0 \\ b \cdot z_2 = 0 \\ \quad \vdots \\ b \cdot z_n = 0 \end{cases}$$ # # # # From which $b$ can be determined, for example by Gaussian elimination. # </li> # </ol> # # So, in this particular problem the quantum algorithm performs exponentially fewer steps than the classical one. Once again, it might be difficult to envision an application of this algorithm (although it inspired the most famous algorithm created by Shor) but it represents the first proof that there can be an exponential speed-up in solving a specific problem by using a quantum computer rather than a classical one. # ## 2. Example <a id='example'></a> # # Let's see the example of Simon's algorithm for 2 qubits with the secret string $b=11$, so that $f(x) = f(y)$ if $y = x \oplus b$. The quantum circuit to solve the problem is: # # ![image2](images/simon_example.jpeg) # # <ol> # <li> Two $2$-qubit input registers are initialized to the zero state: # # # $$\lvert \psi_1 \rangle = \lvert 0 0 \rangle_1 \lvert 0 0 \rangle_2 $$ # # </li> # # <li> Apply Hadamard gates to the qubits in the first register: # # # $$\lvert \psi_2 \rangle = \frac{1}{2} \left( \lvert 0 0 \rangle_1 + \lvert 0 1 \rangle_1 + \lvert 1 0 \rangle_1 + \lvert 1 1 \rangle_1 \right) \lvert 0 0 \rangle_2 $$ # # </li> # # <li> For the string $b=11$, the query function can be implemented as $\text{Q}_f = CX_{1_a 2_a}CX_{1_a 2_b}CX_{1_b 2_a}CX_{1_b 2_b}$ (as seen in the circuit diagram above): # # $$ # \begin{aligned} # \lvert \psi_3 \rangle = \frac{1}{2} ( \; # & \lvert 0 0 \rangle_1 \; \lvert 0\oplus 0 \oplus 0, & 0 \oplus 0 \oplus 0 \rangle_2 &\\[5pt] # + & \lvert 0 1 \rangle_1 \; \lvert 0\oplus 0 \oplus 1, & 0 \oplus 0 \oplus 1 \rangle_2 &\\[6pt] # + & \lvert 1 0 \rangle_1 \; \lvert 0\oplus 1 \oplus 0, & 0 \oplus 1 \oplus 0 \rangle_2 &\\[6pt] # + & \lvert 1 1 \rangle_1 \; \lvert 0\oplus 1 \oplus 1, & 0 \oplus 1 \oplus 1 \rangle_2 & \; )\\ # \end{aligned} # $$ # # Thus: # # $$ # \begin{aligned} # \lvert \psi_3 \rangle = \frac{1}{2} ( \quad # & \lvert 0 0 \rangle_1 \lvert 0 0 \rangle_2 & \\[6pt] # + & \lvert 0 1 \rangle_1 \lvert 1 1 \rangle_2 & \\[6pt] # + & \lvert 1 0 \rangle_1 \lvert 1 1 \rangle_2 & \\[6pt] # + & \lvert 1 1 \rangle_1 \lvert 0 0 \rangle_2 & \; )\\ # \end{aligned} # $$ # </li> # # <li> We measure the second register. With $50\%$ probability we will see either $\lvert 0 0 \rangle_2$ or $\lvert 1 1 \rangle_2$. For the sake of the example, let us assume that we see $\lvert 1 1 \rangle_2$. The state of the system is then # # # $$ \lvert \psi_4 \rangle = \frac{1}{\sqrt{2}} \left( \lvert 0 1 \rangle_1 + \lvert 1 0 \rangle_1 \right) $$ # # # # where we omitted the second register since it has been measured. # # </li> # # # # <li> Apply Hadamard on the first register # $$ \lvert \psi_5 \rangle = \frac{1}{2\sqrt{2}} \left[ \left( \lvert 0 \rangle + \lvert 1 \rangle \right) \otimes \left( \lvert 0 \rangle - \lvert 1 \rangle \right) + \left( \lvert 0 \rangle - \lvert 1 \rangle \right) \otimes \left( \lvert 0 \rangle + \lvert 1 \rangle \right) \right] \\ # = \frac{1}{2\sqrt{2}} \left[ \lvert 0 0 \rangle - \lvert 0 1 \rangle + \lvert 1 0 \rangle - \lvert 1 1 \rangle + \lvert 0 0 \rangle + \lvert 0 1 \rangle - \lvert 1 0 \rangle - \lvert 1 1 \rangle \right] \\ # = \frac{1}{\sqrt{2}} \left( \lvert 0 0 \rangle - \lvert 1 1 \rangle \right)$$ # # </li> # # <li> Measuring the first register will give either $\lvert 0 0 \rangle$ or $\lvert 1 1 \rangle$ with equal probability. # </li> # <li> # If we see $\lvert 1 1 \rangle$, then: # # # $$ b \cdot 11 = 0 $$ # # which tells us that $b \neq 01$ or $10$, and the two remaining potential solutions are $b = 00$ or $b = 11$. Note that $b = 00$ will always be a trivial solution to our simultaneous equations. If we repeat steps 1-6 many times, we would only measure $|00\rangle$ or $|11\rangle$ as # # $$ b \cdot 11 = 0 $$ # $$ b \cdot 00 = 0 $$ # # are the only equations that satisfy $b=11$. We can verify $b=11$ by picking a random input ($x_i$) and checking $f(x_i) = f(x_i \oplus b)$. For example: # # $$ 01 \oplus b = 10 $$ # $$ f(01) = f(10) = 11$$ # # </li> # </ol> # ## 3. Qiskit Implementation <a id='implementation'></a> # # We now implement Simon's algorithm for an example with $3$-qubits and $b=110$. # + tags=["thebelab-init"] # importing Qiskit from qiskit import IBMQ, BasicAer from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, execute # import basic plot tools from qiskit.visualization import plot_histogram from qiskit_textbook.tools import simon_oracle # - # The function `simon_oracle` (imported above) creates a Simon oracle for the bitstring `b`. This is given without explanation, but we will discuss the method in [section 4](#oracle). # # In Qiskit, measurements are only allowed at the end of the quantum circuit. In the case of Simon's algorithm, we actually do not care about the output of the second register, and will only measure the first register. # + b = '110' n = len(b) simon_circuit = QuantumCircuit(n*2, n) # Apply Hadamard gates before querying the oracle simon_circuit.h(range(n)) # Apply barrier for visual separation simon_circuit.barrier() simon_circuit += simon_oracle(b) # Apply barrier for visual separation simon_circuit.barrier() # Apply Hadamard gates to the input register simon_circuit.h(range(n)) # Measure qubits simon_circuit.measure(range(n), range(n)) simon_circuit.draw() # - # ### 3a. Experiment with Simulators <a id='simulation'></a> # # We can run the above circuit on the simulator. # use local simulator backend = BasicAer.get_backend('qasm_simulator') shots = 1024 results = execute(simon_circuit, backend=backend, shots=shots).result() counts = results.get_counts() plot_histogram(counts) # Since we know $b$ already, we can verify these results do satisfy $b\cdot z = 0 \pmod{2}$: # + # Calculate the dot product of the results def bdotz(b, z): accum = 0 for i in range(len(b)): accum += int(b[i]) * int(z[i]) return (accum % 2) for z in counts: print( '{}.{} = {} (mod 2)'.format(b, z, bdotz(b,z)) ) # - # Using these results, we can recover the value of $b = 110$ by solving this set of simultaneous equations. For example, say we first measured `001`, this tells us: # # $$ # \require{cancel} # \begin{aligned} # b \cdot 001 &= 0 \\ # (b_2 \cdot 0) + (b_1 \cdot 0) + (b_0 \cdot 0) & = 0 \\ # (\cancel{b_2 \cdot 0}) + (\cancel{b_1 \cdot 0}) + (b_0 \cdot 1) & = 0 \\ # b_0 & = 0\\ # \end{aligned} # $$ # # If we next measured `111`, we have: # # $$ # \require{cancel} # \begin{aligned} # b \cdot 111 &= 0 \\ # (b_2 \cdot 1) + (b_1 \cdot 1) + (\cancel{0 \cdot 1}) & = 0 \\ # (b_2 \cdot 1) + (b_1 \cdot 1) & = 0 \\ # \end{aligned} # $$ # # Which tells us either: # # $$ b_2 = b_1 = 0, \quad b = 000 $$ # # or # # $$ b_2 = b_1 = 1, \quad b = 110 $$ # # Of which $b = 110$ is the non-trivial solution to our simultaneous equations. We can solve these problems in general using [Gaussian elimination](https://mathworld.wolfram.com/GaussianElimination.html), which has a run time of $O(n^3)$. # ### 3b. Experiment with Real Devices <a id='device'></a> # # The circuit in [section 3a](#simulation) uses $2n = 6$ qubits, while at the time of writing many IBM Quantum devices only have 5 qubits. We will run the same code, but instead using $b=11$ as in the example in section 2, requiring only 4 qubits. # + b = '11' n = len(b) simon_circuit_2 = QuantumCircuit(n*2, n) # Apply Hadamard gates before querying the oracle simon_circuit_2.h(range(n)) # Query oracle simon_circuit_2 += simon_oracle(b) # Apply Hadamard gates to the input register simon_circuit_2.h(range(n)) # Measure qubits simon_circuit_2.measure(range(n), range(n)) simon_circuit_2.draw() # - # This circuit is slightly different to the circuit shown in [section 2](#example). The outputs are different, but the input collisions are the same, i.e. both have the property that $f(x) = f(x \oplus 11)$. # + tags=["uses-hardware"] # Load our saved IBMQ accounts and get the least busy backend device with less than or equal to 5 qubits IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= n and not x.configuration().simulator and x.status().operational==True)) print("least busy backend: ", backend) # Execute and monitor the job from qiskit.tools.monitor import job_monitor shots = 1024 job = execute(simon_circuit_2, backend=backend, shots=shots, optimization_level=3) job_monitor(job, interval = 2) # Get results and plot counts device_counts = job.result().get_counts() plot_histogram(device_counts) # + tags=["uses-hardware"] # Calculate the dot product of the results def bdotz(b, z): accum = 0 for i in range(len(b)): accum += int(b[i]) * int(z[i]) return (accum % 2) print('b = ' + b) for z in device_counts: print( '{}.{} = {} (mod 2) ({:.1f}%)'.format(b, z, bdotz(b,z), device_counts[z]*100/shots)) # - # As we can see, the most significant results are those for which $b\cdot z = 0$ (mod 2). The other results are erroneous, but have a lower probability of occurring. Assuming we are unlikely to measure the erroneous results, we can then use a classical computer to recover the value of $b$ by solving the linear system of equations. For this $n=2$ case, $b = 11$. # ## 4. Oracle <a id='oracle'></a> # # The above [example](#example) and [implementation](#implementation) of Simon's algorithm are specifically for specific values of $b$. To extend the problem to other secret bit strings, we need to discuss the Simon query function or oracle in more detail. # # The Simon algorithm deals with finding a hidden bitstring $b \in \{0,1\}^n$ from an oracle $f_b$ that satisfies $f_b(x) = f_b(y)$ if and only if $y = x \oplus b$ for all $x \in \{0,1\}^n$. Here, the $\oplus$ is the bitwise XOR operation. Thus, if $b = 0\ldots 0$, i.e., the all-zero bitstring, then $f_b$ is a 1-to-1 (or, permutation) function. Otherwise, if $b \neq 0\ldots 0$, then $f_b$ is a 2-to-1 function. # # In the algorithm, the oracle receives $|x\rangle|0\rangle$ as input. With regards to a predetermined $b$, the oracle writes its output to the second register so that it transforms the input to $|x\rangle|f_b(x)\rangle$ such that $f(x) = f(x\oplus b)$ for all $x \in \{0,1\}^n$. # # Such a blackbox function can be realized by the following procedures. # # - Copy the content of the first register to the second register. # $$ # |x\rangle|0\rangle \rightarrow |x\rangle|x\rangle # $$ # # - **(Creating 1-to-1 or 2-to-1 mapping)** If $b$ is not all-zero, then there is the least index $j$ so that $b_j = 1$. If $x_j = 0$, then XOR the second register with $b$. Otherwise, do not change the second register. # $$ # |x\rangle|x\rangle \rightarrow |x\rangle|x \oplus b\rangle~\mbox{if}~x_j = 0~\mbox{for the least index j} # $$ # # - **(Creating random permutation)** Randomly permute and flip the qubits of the second register. # $$ # |x\rangle|y\rangle \rightarrow |x\rangle|f_b(y)\rangle # $$ # # ## 5. Problems <a id='problems'></a> # # 1. Implement a general Simon oracle using Qiskit. # 2. Test your general Simon oracle with the secret bitstring $b=1001$, on a simulator and device. Are the results what you expect? Explain why. # ## 6. References <a id='references'></a> # # 1. <NAME> (1997) "On the Power of Quantum Computation" SIAM Journal on Computing, 26(5), 1474–1483, [doi:10.1137/S0097539796298637](https://doi.org/10.1137/S0097539796298637) # 2. <NAME> and <NAME>. Optimal separation in exact query complexities for Simon's problem. Journal of Computer and System Sciences 97: 83-93, 2018, [https://doi.org/10.1016/j.jcss.2018.05.001](https://doi.org/10.1016/j.jcss.2018.05.001) import qiskit qiskit.__qiskit_version__
content/ch-algorithms/simon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/adityacd/Machine-Leaning/blob/master/Practice.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="ZA97RlPdy6lH" colab_type="code" colab={} import numpy as np import pandas as pd import matplotlib.pyplot as plt # + id="uUEnKwQyXVIj" colab_type="code" colab={} dataset = pd.read_csv('Position_Salaries.csv') X = dataset.iloc[:, 1:-1].values y = dataset.iloc[:, -1].values # + id="9417RQHpLdo1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="fc9c5e5a-0e4a-4bda-c9c7-9e9f1fc1f639" from sklearn.ensemble import RandomForestRegressor regressor = RandomForestRegressor(n_estimators = 10, random_state = 0) regressor.fit(X, y) # + id="4AF3Wbl5MmQ0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="de406a6a-1ecc-48fc-da14-b61d0181d84b" regressor.predict([[6.5]])
Practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/gfgullo/AVSpeechSynthesizer-Example/blob/master/8%20-%20Chatbot/chatbot_seq2seq.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="4JI1tKyAjT66" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 226} outputId="ab238a29-e4b0-4598-dd8e-9feb1cf47c0c" # !wget http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip # + id="gTbFKg-Ej3-E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="c66ea6aa-8af6-448d-e497-7a8a4902502c" # !unzip cornell_movie_dialogs_corpus.zip # + id="anYbp1HGkYh6" colab_type="code" colab={}
8 - Chatbot/chatbot_seq2seq.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # One-step Ignition Mechanism for n-heptane/air Combustion # # _This demo is part of Spitfire, with [licensing and copyright info here.](https://github.com/sandialabs/Spitfire/blob/master/license.md)_ # # # _Highlights_ # - Building a simple mechanism from Python with Cantera # - Comparing n-heptane ignition behavior for two reaction mechanisms # ## Introduction # # This demonstration shows how to build a reaction mechanism, here a simple one-step global combustion reaction for n-heptane, using Cantera. We then use Spitfire to simulate homogeneous ignition of an h-heptane/air mixture, with both the one-step reaction mechanism and a kinetics model from Liu et al. (*Effects on Strain Rate on High-Pressure Nonpremixed N-Heptane Autoignition in Counterflow*, Comb. Flame, 137, 320-339, 2004.). from spitfire import ChemicalMechanismSpec, HomogeneousReactor import cantera as ct import matplotlib.pyplot as plt # ## Building the Mechanism # # # ### Specifying the Reaction # Cantera provides several formats for chemical reaction mechanism - see [here](https://cantera.org/tutorials/input-files.html) for the details. We use the CTI format below to create the following reaction and its non-elementary rate expression. A standard Arrhenius rate constant is employed. # # $$ # 2\mathrm{C}_7\mathrm{H}_{16} \, + 22\mathrm{O}_2 \, \rightarrow 14\mathrm{CO}_2 \, + 16\mathrm{H}_2\mathrm{O} # $$ # # $$ # \mathrm{rate} = 2\cdot10^{7}\exp\left(-\frac{30 \mathrm{kcal}/\mathrm{mol}}{RT}\right)\langle\mathrm{C}_7\mathrm{H}_{16}\rangle^{0.25}\langle\mathrm{O}_2\rangle^{1.5} # $$ # # An advantage to building this mechanism in Python is that we could easily test a range of parameters such as the activation energy or pre-exponential factor. We could even build a reactor into an optimization loop to identify an optimal one-step reaction rate in some sense. reaction_cti = ''' reaction( '2 NXC7H16 + 22 O2 => 14 CO2 + 16 H2O', [2e7, 0, (30.0, 'kcal/mol')], order='NXC7H16:0.25 O2:1.5') ''' # ### Specifying Species Properties # # Now we need to specify thermodynamic properties - molecular weights and heat capacity polynomials over temperature. We obtain these from the Liu et al. mechanism. Below we build the `species_list` to contain the Cantera `Species` objects required for the one-step mechanism. # + species_in_model = ['NXC7H16', 'O2', 'H2O', 'CO2', 'N2'] liu_xml_file = 'heptane-liu-hewson-chen-pitsch-highT.xml' species_data = ct.Species.listFromFile(liu_xml_file) species_list = list() for sp in species_data: if sp.name in species_in_model: species_list.append(sp) # - # ### Combine for a Cantera `Solution` # # Now the final step - build a Cantera `Solution` that includes the listed species and the reaction(s) defined above. s = ct.Solution(thermo='IdealGas', kinetics='GasKinetics', species=species_list, reactions=[ct.Reaction.fromCti(reaction_cti)]) # ## Comparing Ignition Behavior # # Now we create Spitfire `ChemicalMechanismSpec` objects, which can be done with the `Solution` object as follows, or with the XML as we've done before (which we use below to build the Liu mechanism holder). # # Given the mechanisms, we can now build `HomogeneousReactor` instances filled with stoichiometric n-heptane/air mixtures, and integrate them for 100 ms, which will encompass an ignition event from a sparked mixture at 1000 K and atmospheric pressure. Major species and temperature are plotted over time in the following figures. # + mech_1step = ChemicalMechanismSpec.from_solution(s) mech_liu = ChemicalMechanismSpec(cantera_xml=xml_file_for_species, group_name='gas') solutions = dict() for mech, name in [(mech_1step, '1 step'), (mech_liu, 'Liu')]: fuel = mech.stream('X', 'NXC7H16:1') air = mech.stream(stp_air=True) mix = mech.mix_for_equivalence_ratio(phi=1., fuel=fuel, oxy=air) mix.TP = 1000, 101325 reactor = HomogeneousReactor(mech_spec=mech, initial_mixture=mix, configuration='isobaric', heat_transfer='adiabatic', mass_transfer='closed') solutions[name] = reactor.integrate_to_time(0.1) # - for name in solutions: solution = solutions[name] t = solution.time_values * 1.e3 fig, axY = plt.subplots() axY.plot(t, solution['mass fraction NXC7H16'], label='$\\mathrm{C}_7\\mathrm{H}_{16}$') axY.plot(t, solution['mass fraction O2'], label='$\\mathrm{O}_2$') axY.plot(t, solution['mass fraction CO2'], label='$\\mathrm{CO}_2$') axY.plot(t, solution['mass fraction H2O'], label='$\\mathrm{H}_2\\mathrm{O}$') axY.legend(loc='center left') axY.set_ylabel('mass fraction') axY.set_xlim([0, 100]) axY.set_ylim([0, 0.25]) axY.set_xlabel('t (ms)') axT = axY.twinx() axT.plot(t, solution['temperature'], 'k--', label='temperature') axT.set_ylabel('T (K)') axT.set_ylim([0, 3100]) axT.legend(loc='center right') plt.title(name) fig.tight_layout() plt.show() # The plots above suggest several key differences between the one-step and more detailed reaction mechanisms. # # - The one-step mechanism predicts that the ignited mixture is hotter (~400 K) and contains more CO2. The simplified chemistry represents idealized combustion, with all hydrogen going to H2O and all carbon going to CO2. # - The more detailed mechanism predicts breakdown of the n-heptane in the fuel during the induction phase. # # The following plot shows the Liu et al. results, this time including the ethylene (C2H4) and carbon monoxide (CO) mass fractions to show the breakdown of n-heptane into smaller hydrocarbons and to make up the difference in CO2 prediction from the one-step model. # + solution = solutions['Liu'] t = solution.time_values * 1.e3 fig, axY = plt.subplots() axY.plot(t, solution['mass fraction NXC7H16'], label='$\\mathrm{C}_7\\mathrm{H}_{16}$') axY.plot(t, solution['mass fraction O2'], label='$\\mathrm{O}_2$') axY.plot(t, solution['mass fraction CO2'], label='$\\mathrm{CO}_2$') axY.plot(t, solution['mass fraction H2O'], label='$\\mathrm{H}_2\\mathrm{O}$') axY.plot(t, solution['mass fraction C2H4'], '-.', label='$\\mathrm{C}_2\\mathrm{H}_4$') axY.plot(t, solution['mass fraction CO'], '-.', label='$\\mathrm{CO}$') axY.legend(loc='upper left') axY.set_ylabel('mass fraction') axY.set_xlim([0, 100]) axY.set_xlabel('t (ms)') axT = axY.twinx() axT.plot(t, solution['temperature'], 'k-', label='temperature') axT.set_ylabel('T (K)') axT.legend(loc='center right') plt.title(name) fig.tight_layout() plt.show() # - # ## Conclusions # # This notebook has briefly showcased the use of Cantera to build reaction mechanisms from Python, and the use of Spitfire to compare ignition behavior of two n-heptane combustion mechanisms.
docs/source/demo/reactors/one_step_heptane_ignition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Evolutionary Game Theory # # In the previous chapter, we considered the case of fitness being independant of the distribution of the whole population (the rates of increase of 1 type just depended on the quantity of that type). That was a specific case of Evolutionary game theory which considers **frequency dependent selection**. # # # --- # # ## Frequency dependent selection # # [Video](https://youtu.be/PFtwwrcouXY?list=PLnC5h3PY-znxMsG0TRYGOyrnEO-QhVwLb) # # Consider. Let $x=(x_1, x_2)$ correspond to the population sizes of both types. The fitness functions are given by: # # $$f_1(x)\qquad f_2(x)$$ # # As before we ensure a constant population size: $x_1 + x_2 = 1$. We have: # # $$ # \frac{dx_1}{dt}=x_1(f_1(x)-\phi) \qquad \frac{dx_2}{dt}=x_2(f_2(x)-\phi) # $$ # # we again have: # # # $$ # \frac{dx_1}{dt} + \frac{dx_2}{dt}=x_1(f_1(x)-\phi) + x_2(f_2(x)-\phi)=0 # $$ # # # So $\phi=x_1f_1(x)+x_2f_2(x)$ (the average fitness). # # We can substitute: $x_2=1-x_1$ to obtain: # # $$ # \frac{dx_1}{dt}=x_1(f_1(x)-x_1f_1(x)-x_2f_2(x))=x_1((1-x_1)f_1(x)-(1-x_1)f_2(x)) # $$ # # $$ # \frac{dx_1}{dt}=x_1(1-x_1)(f_1(x)-f_2(x)) # $$ # # We see that we have 3 equilibria: # # - $x_1=0$ # - $x_1=1$ # - Whatever distribution of $x$ that ensures: $f_1(x)=f_2(x)$ # # # --- # # ## Evolutionary Game Theory # # Now we will consider potential differences of these equilibria. First we will return to considering Normal form games: # # $$ # A = # \begin{pmatrix} # a & b\\ # c & d # \end{pmatrix} # $$ # # Evolutionary Game theory assigns strategies as types in a population, and indivividuals randomly encounter other individuals and play their corresponding strategy. The matrix $A$ correspods to the utility of a row player in a game where the row player is a given individual and the column player is the population. # # This gives: # # $$f_1=ax_1+bx_2\qquad f_2=cx_1+dx_2$$ # # or equivalently: # # $$f=Ax\qquad \phi=fx$$ # # thus we have the same equation as before but in matrix notation: # # $$\frac{dx}{dt}=x(f-\phi)$$ # # --- # # In this case, the 3 stable distributions correspond to: # # - An entire population playing the first strategy; # - An entire population playing the second strategy; # - A population playing a mixture of first and second (such that there is indifference between the fitness). # # --- # # We now consider the utility of a stable population in a **mutated** population. # # # --- # # ## Mutated population # # Given a strategy vector $x=(x_1, x_2)$, some $\epsilon>0$ and another strategy $y=(y_1, y_2)$, the post entry population $x_{\epsilon}$ is given by: # # $$ # x_{\epsilon} = (x_1 + \epsilon(y_1 - x_1), x_2 + \epsilon(y_2 - x_2)) # $$ # # # # --- # # ## Evolutionary Stable Strategies # # [Video](https://youtu.be/lbzcToUM9ic?list=PLnC5h3PY-znxMsG0TRYGOyrnEO-QhVwLb) # # Given a stable population distribution, $x$ it represents an **Evolutionary Stable Strategy** (ESS) if and only if there exists $\bar\epsilon>0$: # # $$u(x, x_{\epsilon})>u(y, x_{\epsilon})\text{ for all }0<\epsilon<\bar\epsilon, y$$ # # # where $u(x, y)$ corresponds to the fitness of strategy $x$ in population $y$ which is given by: # # $$xAy^T$$ # # --- # # For the first type to be an ESS this corresponds to: # # $$a(1-\epsilon)+b\epsilon > c(1-\epsilon) + d\epsilon$$ # # For small values of $\epsilon$ this corresponds to: # # $$a>c$$ # # However if $a=c$, this corresponds to: # # $$b>d$$ # # Thus the first strategy is an ESS (ie resists invasion) iff one of the two hold: # # 1. $a > c$ # 2. $a=c$ and $b > d$ import numpy as np import nashpy as nash import matplotlib.pyplot as plt # The case of $a>c$: # + tags=["nbval-ignore-output"] A = np.array([[4, 3], [2, 1]]) game = nash.Game(A) timepoints = np.linspace(0, 10, 1000) epsilon = 10 ** -1 xs = game.replicator_dynamics( y0=[1 - epsilon, epsilon], timepoints=timepoints, ) plt.plot(xs); # - # The case of $a=c$ and $b>d$: # + tags=["nbval-ignore-output"] A = np.array([[4, 3], [4, 1]]) game = nash.Game(A) xs = game.replicator_dynamics( y0=[1 - epsilon, epsilon], timepoints=timepoints, ) plt.plot(xs); # - # $a=c$ and $b < d$: # + tags=["nbval-ignore-output"] A = np.array([[4, 3], [4, 5]]) game = nash.Game(A) xs = game.replicator_dynamics( y0=[1 - epsilon, epsilon], timepoints=timepoints, ) plt.plot(xs); # - # $a < c$: # + tags=["nbval-ignore-output"] A = np.array([[1, 3], [4, 1]]) game = nash.Game(A) xs = game.replicator_dynamics( y0=[1 - epsilon, epsilon], timepoints=timepoints, ) plt.plot(xs); # - # We see in the above case that the population seems to stabilise at a mixed strategy. This leads to the general definition of the fitness of a mixed strategy: $x=(x_1, x_2)$: # # $$u(x,x) = x_1f_1(x)+x_2f_2(x)$$ # # --- # # ## General condition for ESS # # [Video](https://youtu.be/zkhInay5xQc?list=PLnC5h3PY-znxMsG0TRYGOyrnEO-QhVwLb) # # If $x$ is an ESS, then for all $y\ne x$, either: # # 1. $u(x,x)>u(y,x)$ # 2. $u(x,x)=u(y,x)$ and $u(x,y)>u(y,y)$ # --- # # # Conversely, if either (1) or (2) holds for all $y\ne x$ then $x$ is an ESS. # # --- # # ### Proof # # --- # # If $x$ is an ESS, then by definition: # # $$u(x,x_{\epsilon})>u(y,x_{\epsilon})$$ # # which corresponds to: # # $$(1-\epsilon)u(x,x)+\epsilon u(x,y)>(1-\epsilon)u(y,x)+\epsilon u(y,y)$$ # # - If condition 1 of the theorem holds then the above inequality can be satisfied for \\(\epsilon\\) sufficiently small. If condition 2 holds then the inequality is satisfied. # - Conversely: # # - If $u(x,x) < u(y,x)$ then we can find $\epsilon$ sufficiently small such that the inequality is violated. # # - If $u(x, x) = u(y,x)$ and $u(x,y) \leq u(y,y)$ then the inequality is violated. # # This result gives us an efficient way of computing ESS. The first condition is in fact almost a condition for Nash Equilibrium (with a strict inequality), the second is thus a stronger condition that removes certain Nash equilibria from consideration. This becomes particularly relevant when considering Nash equilibrium in mixed strategies. # # To find ESS in a pairwise context population game we: # # 1. Write down the associated two-player game $(A, A^T)\in{\mathbb{R}^{m\times n}}^2$; # 2. Identify all symmetric Nash equilibria of the game; # 3. Test the Nash equilibrium against the two conditions of the above Theorem. # # Let us apply it to the one example that seemed to stabilise at a mixed strategy: # # $$ # A =\begin{pmatrix} # 1 & 3\\ # 4 & 1 # \end{pmatrix} # $$ import nashpy as nash game = nash.Game(A, A.transpose()) list(game.support_enumeration()) # Looking at $x=(.4, .6)$ (which is the only symmetric nash equilibrium), we have # # $$u(x, x)=u(y, x)$$ # # and (recall $y_1 + y_2 = 1$): # # $$ # u(x, y)=2.8y_1 + 1.8y_2=2.8y_1 + 1.8(1-y_1)=y_1+1.8 # $$ # # \begin{align} # u(y, y)&=y_1^2+3y_1y_2+4y_1y_2+y_2^2\\ # &=y_1^2+7y_1-7y_1^2+1 - 2y_1 + y_1^2\\ # &=5y_1-5y_1^2+1 # \end{align} # # Thus: # # $$u(x, y) - u(y, y) = -4y_1+5y_1^2+.8 = 5(y_1 - .4)^2$$ # # however $y_1\ne.4$ thus $x=(.4, .6)$ is an ESS. # # Here is some code to verify the above calculations: import sympy as sym sym.init_printing() A = sym.Matrix(A) y_1, y_2 = sym.symbols("y_1, y_2") y = sym.Matrix([y_1, y_2]) A, y rhs = sym.expand((y.transpose() * A * y)[0].subs({y_2: 1 - y_1})) rhs lhs = sym.expand((sym.Matrix([[.4, .6]]) * A * y)[0].subs({y_2: 1-y_1})) lhs sym.factor(lhs - rhs)
nbs/chapters/08-Evolutionary-Game-Theory.ipynb