code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # D:\Toppan\jupyter\per machine import pandas as pd import os import numpy as np from datetime import timedelta import time import glob from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline # + in_dir = 'D:\\Toppan\\2017-11-20 全データ\\処理済(総量)\\vectorized_keikaku_shibata' files = [f for f in os.listdir(in_dir)] voc_dir_base = 'D:\\Toppan\\2017-11-20 全データ\\データ\\' base_out_dir = 'D:\\Toppan\\2017-11-20 全データ\\解析結果(総量)\\年間モデル\\追加学習なし\\ExtraTrees_30_days' # train and test data energies = ['蒸気', '冷水', '電力'] month = [ '16年11月', '16年12月', '17年1月', '17年2月', '17年3月', '17年4月', '17年5月', '17年6月', '17年7月', '17年8月', '17年9月', '17年10月', '17年11月', '17年12月', '18年1月', '18年2月'] month_file = [ '201611010800.xlsx', '201612010800.xlsx', '201701010800.xlsx', '201702010800.xlsx', '201703010800.xlsx', '201704010800.xlsx', '201705010800.xlsx', '201706010800.xlsx', '201707010800.xlsx', '201708010800.xlsx', '201709010800.xlsx', '201710010800.xlsx', '201711010800.xlsx', '201712010800.xlsx', '201801010800.xlsx', '201802010800.xlsx', ] # - def read_df(file, month, sheet_name): # data df = pd.read_excel(os.path.join(in_dir, file), sheet_name=sheet_name) # April special case if month == '17年4月': df = df.iloc[:358] # voc ''' voc_dir = os.path.join(voc_dir_base, month, 'VOC再利用追加') voc_f = glob.glob(voc_dir + '/*.csv')[0] df_voc = pd.read_csv(voc_f, index_col=0, encoding='shift-jis', engine='python', parse_dates=True).fillna(0) ''' df.columns = [ '総稼動時間-3', '計画投入数(R)-3', '外気温度-3', '外気湿度-3', '計画色数-3', '段取-3', '計画停止-3', '計画開始完了数-3', sheet_name + '-3', '総稼動時間-2', '計画投入数(R)-2', '外気温度-2', '外気湿度-2', '計画色数-2', '段取-2', '計画停止-2', '計画開始完了数-2', sheet_name + '-2', '総稼動時間-1', '計画投入数(R)-1', '外気温度-1', '外気湿度-1', '計画色数-1', '段取-1', '計画停止-1', '計画開始完了数-1', sheet_name + '-1', 'target'] # add voc to data ''' df['VOC燃料生成量-3'] = pd.Series(data=df_voc['VOC燃料生成量'].iloc[:-2].values, index=df.index) df['VOC燃料生成量-2'] = pd.Series(data=df_voc['VOC燃料生成量'].iloc[1:-1].values, index=df.index) df['VOC燃料生成量-1'] = pd.Series(data=df_voc['VOC燃料生成量'].iloc[2:].values, index=df.index) df['VOC再利用生成量-3'] = pd.Series(data=df_voc['VOC再利用生成量'].iloc[:-2].values, index=df.index) df['VOC再利用生成量-2'] = pd.Series(data=df_voc['VOC再利用生成量'].iloc[1:-1].values, index=df.index) df['VOC再利用生成量-1'] = pd.Series(data=df_voc['VOC再利用生成量'].iloc[2:].values, index=df.index) ''' return df # + holiday_path = 'D:\\Toppan\\2017-11-20 全データ\\データ\\切り離し全休日\\全休日.xlsx' def mask_out(X, y, month): try: df_filter = pd.read_excel(holiday_path, sheet_name=month, index_col=0).iloc[2:] except Exception as e: print(e, month) return X, y seisan = True if '生産\n有無' in df_filter else False def isBusy(idx): row = df_filter.loc[idx] if row.loc['切離\n有無'] == '切離' or row.loc['全休\n判定'] == '全休' \ or row.loc['異常判定'] == '※異常稼動' or (seisan and row.loc['生産\n有無'] == '無'): return False else: return True x_busy_idx = [] y_busy_idx = [] for x_idx, y_idx in zip (X.index, y.index): if isBusy(x_idx) and isBusy(y_idx): x_busy_idx.append(x_idx) y_busy_idx.append(y_idx) return X.loc[x_busy_idx], y.loc[y_busy_idx] # - def get_importance_figure(model, name, features): indices = np.argsort(model.feature_importances_)[::-1] # save csv s = pd.Series(data=model.feature_importances_[indices], index=features[indices]) s.to_csv(os.path.join(out_dir, name + '_寄与度.csv'), encoding='shift-jis') # + def split_day_night(acc_abs): acc_abs_days, acc_abs_nights = [], [] for i, acc in acc_abs.iteritems(): if 7 < i.hour < 22: acc_abs_days.append(acc) else: acc_abs_nights.append(acc) return acc_abs_days, acc_abs_nights def get_output(res, output, sname, month): res = res[res['target'] != 0] if len(res) == 0: return None y_pred, y_true = res['preds'], res['target'] '''calculate abs accuracy''' acc_abs = abs(y_pred - y_true) / y_true '''aplit days and nights''' acc_abs_days, acc_abs_nights = split_day_night(acc_abs) len_days, len_nights = len(acc_abs_days), len(acc_abs_nights) sname2acc = {'蒸気': [0.2, 0.15], '電力': [0.09, 0.15], '冷水': [0.15, 0.1]} '''acc stats''' len_acc_days = len(list(filter(lambda x: x <= sname2acc[sname][0], acc_abs_days))) len_acc_nights = len(list(filter(lambda x: x <= sname2acc[sname][0], acc_abs_nights))) acc_stats_days = len_acc_days / len_days acc_stats_nights = len_acc_nights / len_nights output['設備名'].append(month + '_' + sname) output['平日昼・総'].append(len_days) output['平日夜・総'].append(len_nights) output['平日昼・基準内'].append(len_acc_days) output['平日夜・基準内'].append(len_acc_nights) output['平日昼基準率'].append(acc_stats_days) output['平日夜基準率'].append(acc_stats_nights) return output # - # ### 月データを学習 def get_trans_model(x_train, # pandas data frame y_train, # pandas series trans_month, # list of month strings trans_month_file, # list of month file strings test_month, # test month string out_dir): # output directory def init_model(): return ExtraTreesRegressor(n_estimators=700, n_jobs=-1, max_depth=11, max_features='auto', criterion='mae', random_state=700, warm_start=True) # base model base_model = init_model() base_model.fit(x_train, y_train) # database preparation x_trans_all, y_trans_all = [], [] for trans_m, trans_f in zip(trans_month, trans_month_file): df_trans = read_df(trans_f, trans_m, energy) x_trans, y_trans = mask_out(df_trans.drop(columns=['target']).iloc[:-1], df_trans['target'].iloc[1:], trans_m) x_trans_all.append(x_trans) y_trans_all.append(y_trans) x_trans_all, y_trans_all = pd.concat(x_trans_all), pd.concat(y_trans_all) # pick up transfer data from data base start = time.time() x_picked, y_picked = [], [] for idx, row in x_trans_all.iterrows(): pred = base_model.predict(row.values.reshape(1, -1))[0] true = y_trans_all.loc[idx + timedelta(hours=1)] if true != 0 and np.abs(pred - true) / true <= 0.025: x_picked.append(row) y_picked.append(true) elapsed_time = time.time() - start print(test_month, energy, 'transfer elapsed time: ', elapsed_time, 's') print(' number of data picked: ', len(x_picked)) # concat train and picked data x_train_trans = np.vstack((x_train, x_picked)) y_train_trans = np.append(y_train, y_picked) # dump tt_data = np.hstack((x_train_trans, y_train_trans.reshape(-1, 1))) np.savetxt(os.path.join(out_dir, energy + '_学習_転移.csv'), tt_data, delimiter=',') print('Length of train and trans data: ', len(x_train_trans)) # test model model = init_model() # learn 1 hour later target start = time.time() model.fit(x_train_trans, y_train_trans) elapsed_time = time.time() - start print(test_month, energy, 'train elapsed time: ', elapsed_time, 's') # feature importance get_importance_figure(model, energy, x_train.columns) return model # + total_acc = [] for i, (m, f) in enumerate(zip(month, month_file)): #if i == 0: continue if m != '18年1月': continue print(m) # create output dir out_dir = os.path.join(base_out_dir, m) if not os.path.exists(out_dir): os.makedirs(out_dir) # set train, trans, test files train_month, train_month_file = month[i - 1], month_file[i - 1] test_month, test_month_file = m, f trans_month, trans_month_file = [x for x in month if x not in [train_month, test_month]], \ [x for x in month_file if x not in [train_month_file, test_month_file]] print('train: ', train_month, train_month_file) print('test: ', test_month, test_month_file) print('trans: ', trans_month, trans_month_file) # base train model for energy in energies: # train and test data df_train = read_df(train_month_file, train_month, energy) df_test = read_df(test_month_file, test_month, energy) # x, y x_train, y_train = mask_out(df_train.drop(columns=['target']).iloc[:-1], df_train['target'].iloc[1:], train_month) x_test, y_test = mask_out(df_test.drop(columns=['target']).iloc[:-1], df_test['target'].iloc[1:], test_month) # concat train and test x_both, y_both = pd.concat([x_train, x_test]), pd.concat([y_train, y_test]) # get transfer model model = get_trans_model(x_train, y_train, trans_month, trans_month_file, test_month, out_dir) # current test index current_idx = x_test.index[0] # select data of past 30 days for the current idx preds = [] for idx, row in x_test.iterrows(): if idx.day != current_idx.day: # update index of training data begin_idx = max(idx - timedelta(days=30), x_train.index[0]) end_idx = idx - timedelta(days=1) print(begin_idx, end_idx) # update training data x_train = x_both.loc[[x for x in x_both.index if begin_idx <= x < end_idx]] y_train = y_both.loc[[x + timedelta(hours=1) for x in x_train.index]] print(x_train.shape, y_train.shape) # update transfer model model = get_trans_model(x_train, y_train, trans_month, trans_month_file, test_month, out_dir) # update current test index current_idx = idx # predict preds.append(model.predict(row.values.reshape(1, -1))[0]) # save preds and test preds = pd.Series(data=preds, index=y_test.index, name='preds') result = pd.concat([preds, y_test], axis=1) result.to_csv(os.path.join(out_dir, energy + '.csv'), encoding='shift-jis') # accuracy output = {'設備名': [], '平日昼・総': [], '平日夜・総': [], '平日昼・基準内': [], '平日夜・基準内': [], '平日昼基準率': [], '平日夜基準率': []} output = get_output(result, output, energy, test_month) print(test_month, energy, output) # save accuracy accs = pd.DataFrame(output) accs.to_csv(os.path.join(out_dir, energy + '_acc.csv'), index=False, encoding='shift-jis') total_acc.append(accs) total_acc = pd.concat(total_acc) total_acc.to_csv(os.path.join(base_out_dir, 'total_acc.csv'), index=False, encoding='shift-jis') # -
learning_30_days_transfer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append('../') import base import matplotlib.pyplot as plt import numpy as np import os import fnmatch root_paths1 = ['../../Data/Raw/']*8 root_paths2 = ['../../Data/Raw/']*7 tags1 = ['20190923-174441', '20191001-112411', '20191002-115000', \ '20191106-170809', '20191107-183857', '20191108-145125', \ '20191118-161325', '20191127-122008'] tags2 = ['20190923-171112', '20191001-115127', '20191002-111038',\ '20191107-174215', '20191108-142321', '20191118-171209',\ '20191127-132223'] rat_fs1 = base.MultiDaysBeaconPosition(root_paths1, tags1, has_beacon = False, has_metadata= False) rat_fs2 = base.MultiDaysBeaconPosition(root_paths2, tags2, has_beacon = False, has_metadata = False) rat_fs1.multisession_statistics tags1 # ## Crawling function to generate tags automatically for each animal on file. def Make_tags (rat_ID="FS11"): """Takes in animal ID and searches through a specific subfolder in storage 2 to generate time tags for each session PARAMS ------------ rat_ID : str name of animal Returns ------------ list of str of roots and list of str of tags - date time """ animal= '//10.153.170.3/storage2/fabian/data/project/'+rat_ID dirnames = [ name for name in os.listdir(animal) if os.path.isdir(os.path.join(animal, name)) ] roots = [k for k in dirnames if 'BP' in k] filter_tags = [x[-15:] for x in roots] filter_roots = [animal + s for s in roots] return filter_roots, filter_tags, root, tags = Make_tags ("FS11") root, tags = Make_tags ("FS11/") #print (root) #print( tags) rat_fs11 = base.MultiDaysBeaconPosition(root, tags, has_beacon = False, has_metadata= False) # + rat_fs11.num_sessions # - rat_fs11.median_speed rat_fs11.get_trials rat_fs11 = base.MultiDaysBeaconPosition(root, tags, has_beacon = False, has_metadata= False) rat_fs11.dataset_list[0:5] data = rat_fs11.trial_list[1] #data = rat_fs11.get_trials for trial in data: plt.scatter(trial[:,1], trial[:,2], s=1) data = base.BeaconPosition(root[1], tags[1], has_beacon = True, has_metadata = True) data # + for trial in data.trial_list: plt.scatter(trial[:,1], trial[:,2], s=1) # - data = base.MultiDaysBeaconPosition(root, tags, has_beacon = False, has_metadata= False) for i in range(len(data.trial_list[0])): straightness_moment = path_analysis.straightness_moment_time(data.trial_list[0][i][:,:3], before_time=2) straightness_time = path_analysis.straightness_over_time(data.trial_list[0][i][:,:3], before_time=2) if data.trial_visible[0][i]: plt.plot(straightness_time[0], c='magenta') else: plt.plot(straightness_time[0],c='cyan')
refactoring/demo_notebook/Session_stats_plotting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt def warn(*args, **kwargs): pass import warnings warnings.warn = warn from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import StratifiedShuffleSplit train = pd.read_csv('x_train.csv') test = pd.read_csv('x_test.csv') # - test['is_click']=0 # + # Swiss army knife function to organize the data def encode(train, test): le = LabelEncoder().fit(train.is_click) labels = le.transform(train.is_click) # encode species strings classes = list(le.classes_) # save column names for submission test_ids = test.impression_id # save test ids for submission train = train.drop(['is_click', 'impression_id'], axis=1) test = test.drop(['impression_id'], axis=1) return train, labels, test, test_ids, classes train, labels, test, test_ids, classes = encode(train, test) train.head(1) # - # + sss = StratifiedShuffleSplit(labels, 10, test_size=0.2, random_state=23) for train_index, test_index in sss: X_train, X_test = train.values[train_index], train.values[test_index] y_train, y_test = labels[train_index], labels[test_index] # -
Online Competitions/Analytics_Vidhya_Hackathons/WNS Analytics Wizard 2019/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from ContNoRegret.Domains import unitbox from ContNoRegret.LossFunctions import QuadraticLossFunction from ContNoRegret.NLopt import NLoptProblem import matplotlib.pyplot as plt # %matplotlib inline dom = unitbox(2) objective = QuadraticLossFunction(dom, [0,0], np.array([[2,0.5], [0.5,1]]), 0) etas = 500*1/np.arange(1,501) prob = NLoptProblem(dom, objective) actions = np.array(prob.run_minimization(etas, 1)) # - i=0 plt.plot(actions[15:25,i,0], actions[15:25,i,1]) from ContNoRegret.D
ipython_notebooks/NLtest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="512d64e04ad0533dd72ca6bcbca4882a7f289956" # # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import pandas as pd import numpy as np import matplotlib.pyplot as plt import time from datetime import date, datetime, timedelta from sklearn.metrics import accuracy_score # - market_train_df = pd.read_csv("dataset/market_train_df.csv") news_train_df = pd.read_csv("dataset/news_train_df.csv") market_train_df = market_train_df.astype({ 'time': 'datetime64', 'assetCode': 'object', 'assetName': 'category', 'universe': 'float64', 'volume': 'float64', 'close': 'float64', 'open': 'float64', 'returnsClosePrevRaw1': 'float64', 'returnsOpenPrevRaw1': 'float64', 'returnsClosePrevMktres1': 'float64', 'returnsOpenPrevMktres1': 'float64', 'returnsClosePrevRaw10': 'float64', 'returnsOpenPrevRaw10': 'float64', 'returnsClosePrevMktres10': 'float64', 'returnsOpenPrevMktres10': 'float64', 'returnsOpenNextMktres10': 'float64' }) news_train_df = news_train_df.astype({ 'time': 'datetime64', 'sourceTimestamp': 'datetime64', 'firstCreated': 'datetime64', 'sourceId': 'object', 'headline': 'object', 'urgency': 'int8', 'takeSequence': 'int16', 'provider': 'category', 'subjects': 'category', 'audiences': 'category', 'bodySize': 'int32', 'companyCount': 'int8', 'headlineTag': 'object', 'marketCommentary': 'bool', 'sentenceCount': 'int16', 'wordCount': 'int32', 'assetCodes': 'category', 'assetName': 'category', 'firstMentionSentence': 'int16', 'relevance': 'float32', 'sentimentClass': 'int8', 'sentimentNegative': 'float32', 'sentimentNeutral': 'float32', 'sentimentPositive': 'float32', 'sentimentWordCount': 'int32', 'noveltyCount12H': 'int16', 'noveltyCount24H': 'int16', 'noveltyCount3D': 'int16', 'noveltyCount5D': 'int16', 'noveltyCount7D': 'int16', 'volumeCounts12H': 'int16', 'volumeCounts24H': 'int16', 'volumeCounts3D': 'int16', 'volumeCounts5D': 'int16', 'volumeCounts7D': 'int16' }) print(market_train_df.dtypes) print(news_train_df.dtypes) market_train_orig = market_train_df news_train_orig = news_train_df # + [markdown] _uuid="f8d8dd40919f2270d5332427eb43f57f0df2e452" # # Merge & clean data # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" start_date = date(2015,1,1) # (market_train, _) = env.get_training_data() market_train_df = market_train_orig.copy() news_train_df = news_train_orig.copy() print('Market train shape: ',market_train_df.shape) print('News train shape: ', news_train_df.shape) # Sort data market_train_df = market_train_df.sort_values('time') market_train_df['date'] = market_train_df['time'].dt.date # Fill nan market_train_fill = market_train_df column_market = ['returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10', 'returnsOpenPrevMktres10'] column_raw = ['returnsClosePrevRaw1', 'returnsOpenPrevRaw1','returnsClosePrevRaw10', 'returnsOpenPrevRaw10'] for i in range(len(column_raw)): market_train_fill[column_market[i]] = market_train_fill[column_market[i]].fillna(market_train_fill[column_raw[i]]) market_train_orig = market_train_orig.sort_values('time') news_train_orig = news_train_orig.sort_values('time') market_train_df = market_train_orig.copy() news_train_df = news_train_orig.copy() del market_train_orig del news_train_orig market_train_df = market_train_df.loc[market_train_df['time'].dt.date>=start_date] news_train_df = news_train_df.loc[news_train_df['time'].dt.date>=start_date] market_train_df['close_open_ratio'] = np.abs(market_train_df['close']/market_train_df['open']) threshold = 0.5 print('In %i lines price increases by 50%% or more in a day' %(market_train_df['close_open_ratio']>=1.5).sum()) print('In %i lines price decreases by 50%% or more in a day' %(market_train_df['close_open_ratio']<=0.5).sum()) market_train_df = market_train_df.loc[market_train_df['close_open_ratio'] < 1.5] market_train_df = market_train_df.loc[market_train_df['close_open_ratio'] > 0.5] market_train_df = market_train_df.drop(columns=['close_open_ratio']) from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from nltk.corpus import stopwords column_market = ['returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10', 'returnsOpenPrevMktres10'] column_raw = ['returnsClosePrevRaw1', 'returnsOpenPrevRaw1','returnsClosePrevRaw10', 'returnsOpenPrevRaw10'] #the top hundred words. vectorizer = CountVectorizer(max_features=1000, stop_words={"english"}) #we do this with TF-IDF. X = vectorizer.fit_transform(news_train_df['headline'].values) tf_transformer = TfidfTransformer(use_idf=False).fit(X) X_train_tf = tf_transformer.transform(X) X_train_vals = X_train_tf.mean(axis=1) del vectorizer del X del X_train_tf #mean tf-idf score for news article. d = pd.DataFrame(data=X_train_vals) news_train_df['tf_score'] = d market_train_df = market_train_df.loc[market_train_df['time'].dt.date>=start_date] news_train_df = news_train_df.loc[news_train_df['time'].dt.date>=start_date] #add indicator features market_train_df['rolling_average_close_mean'] = market_train_df.groupby('assetCode')['close'].transform('mean') market_train_df['rolling_average_vol_mean'] = market_train_df.groupby('assetCode')['volume'].transform('mean') market_train_df['rolling_average_close_std'] = market_train_df.groupby('assetCode')['close'].transform('std') market_train_df['rolling_average_vol_std'] = market_train_df.groupby('assetCode')['volume'].transform('std') #some more refined instruments market_train_df['moving_average_7_day'] = market_train_df.groupby('assetCode')['close'].transform(lambda x: x.rolling(window=7).mean()) ewma = pd.Series.ewm market_train_df['ewma'] = market_train_df.groupby('assetCode')['close'].transform(lambda x : ewma(x, span=30).mean()) market_train_df['moving_average_7_day'] = market_train_df['moving_average_7_day'].fillna(0) market_train_df['ewma'] = market_train_df['ewma'].fillna(0) for i in range(len(column_raw)): market_train_df[column_market[i]] = market_train_df[column_market[i]].fillna(market_train_df[column_raw[i]]) print('Removing outliers ...') column_return = column_market + column_raw + ['returnsOpenNextMktres10'] orig_len = market_train_df.shape[0] for column in column_return: market_train_df = market_train_df.loc[market_train_df[column]>=-2] market_train_df = market_train_df.loc[market_train_df[column]<=2] new_len = market_train_df.shape[0] rmv_len = np.abs(orig_len-new_len) print('There were %i lines removed' %rmv_len) print('Removing strange data ...') orig_len = market_train_df.shape[0] market_train_df = market_train_df[~market_train_df['assetCode'].isin(['PGN.N','EBRYY.OB'])] #market_train_df = market_train_df[~market_train_df['assetName'].isin(['Unknown'])] new_len = market_train_df.shape[0] rmv_len = np.abs(orig_len-new_len) print('There were %i lines removed' %rmv_len) # Function to remove outliers def remove_outliers(data_frame, column_list, low=0.02, high=0.98): for column in column_list: this_column = data_frame[column] quant_df = this_column.quantile([low,high]) low_limit = quant_df[low] high_limit = quant_df[high] data_frame[column] = data_frame[column].clip(lower=low_limit, upper=high_limit) return data_frame columns_outlier = ['takeSequence', 'bodySize', 'sentenceCount', 'wordCount', 'sentimentWordCount', 'firstMentionSentence','noveltyCount12H',\ 'noveltyCount24H', 'noveltyCount3D', 'noveltyCount5D', 'noveltyCount7D', 'volumeCounts12H', 'volumeCounts24H',\ 'volumeCounts3D','volumeCounts5D','volumeCounts7D'] print('Clipping news outliers ...') news_train_df = remove_outliers(news_train_df, columns_outlier) asset_code_dict = {k: v for v, k in enumerate(market_train_df['assetCode'].unique())} drop_columns = [col for col in news_train_df.columns if col not in ['sourceTimestamp', 'urgency', 'takeSequence', 'bodySize', 'companyCount', 'sentenceCount', 'firstMentionSentence', 'relevance','firstCreated', 'assetCodes']] columns_news = ['firstCreated','relevance','sentimentClass','sentimentNegative','sentimentNeutral', 'sentimentPositive','noveltyCount24H','noveltyCount7D','volumeCounts24H','volumeCounts7D','assetCodes','sourceTimestamp', 'assetName','audiences', 'urgency', 'takeSequence', 'bodySize', 'companyCount', 'sentenceCount', 'firstMentionSentence','time', 'tf_score'] def data_prep(market_df,news_df): market_df['date'] = market_df.time.dt.date market_df['close_to_open'] = market_df['close'] / market_df['open'] market_df.drop(['time'], axis=1, inplace=True) news_df = news_df[columns_news] news_df['sourceTimestamp']= news_df.sourceTimestamp.dt.hour news_df['firstCreated'] = news_df.firstCreated.dt.date news_df['assetCodesLen'] = news_df['assetCodes'].map(lambda x: len(eval(x))) news_df['assetCodes'] = news_df['assetCodes'].map(lambda x: list(eval(x))[0]) news_df['asset_sentiment_count'] = news_df.groupby(['assetName', 'sentimentClass'])['time'].transform('count') news_df['len_audiences'] = news_train_df['audiences'].map(lambda x: len(eval(x))) kcol = ['firstCreated', 'assetCodes'] news_df = news_df.groupby(kcol, as_index=False).mean() market_df = pd.merge(market_df, news_df, how='left', left_on=['date', 'assetCode'], right_on=['firstCreated', 'assetCodes']) del news_df # market_df['assetCodeT'] = market_df['assetCode'].map(asset_code_dict) market_df = market_df.drop(columns = ['firstCreated','assetCodes','assetName']).fillna(0) # print(market_df.count) return market_df print('Merging data ...') market_train_df = data_prep(market_train_df, news_train_df) market_train_df = market_train_df.loc[market_train_df['date']>=start_date] market_train = market_train_df del market_train_df # + _uuid="05f7dec87a5f584037d01bd19ed066f5fd944aaf" market_train.describe().round(3) # + [markdown] _uuid="f7be6288dd6462e2d0b7d5eae2fa70d748a82a1b" # # Prepare model # + _uuid="5dfa1843dfece6fccfca91896ef85a332b55e3e6" # cat_cols = ['assetCode'] # num_cols = ['volume', # 'close', # 'open', # 'returnsClosePrevRaw1', # 'returnsOpenPrevRaw1', # 'returnsClosePrevMktres1', # 'returnsOpenPrevMktres1', # 'returnsClosePrevRaw10', # 'returnsOpenPrevRaw10', # 'returnsClosePrevMktres10', # 'returnsOpenPrevMktres10'] cat_cols = ['assetCodeT'] num_cols = ['volume', 'close', 'open', 'returnsClosePrevRaw1', 'returnsOpenPrevRaw1', 'returnsClosePrevMktres1', 'returnsOpenPrevMktres1', 'returnsClosePrevRaw10', 'returnsOpenPrevRaw10', 'returnsClosePrevMktres10', 'returnsOpenPrevMktres10', 'close_to_open', 'rolling_average_close_mean', 'rolling_average_vol_mean', 'rolling_average_close_std', 'ewma', 'rolling_average_close_std', 'sourceTimestamp', 'urgency', 'companyCount', 'takeSequence', 'bodySize', 'sentenceCount', 'moving_average_7_day','relevance', 'sentimentClass', 'sentimentNegative', 'sentimentNeutral', 'sentimentPositive', 'noveltyCount24H','noveltyCount7D','volumeCounts24H','volumeCounts7D','assetCodesLen', 'asset_sentiment_count', 'len_audiences', 'tf_score'] # + _uuid="e108339134e95473b4a983237d58adb64c3ef64a" from sklearn.model_selection import train_test_split train_indices, val_indices = train_test_split(market_train.index.values, test_size=0.25, random_state=42) # + [markdown] _uuid="f51d00dc43857b446ae4a24b3718753f09040fd5" # # Handling categorical variables # + _uuid="301a65b834d8614a914883d49be1860550174f06" def encode(encoder, x): len_encoder = len(encoder) try: id = encoder[x] except KeyError: id = len_encoder return id encoders = [{} for cat in cat_cols] # for i, cat in enumerate(cat_cols): # print('encoding %s ...' % cat, end=' ') # encoders[i] = {l: id for id, l in enumerate(market_train.loc[train_indices, cat].astype(str).unique())} # market_train[cat] = market_train[cat].astype(str).apply(lambda x: encode(encoders[i], x)) # print('Done') market_train['assetCodeT'] = market_train['assetCode'].astype(str).apply(lambda x: encode(encoders[0], x)) embed_sizes = [len(encoder) + 1 for encoder in encoders] #+1 for possible unknown assets # + [markdown] _uuid="eae09951757845ad5bdf08e004e6477513ed739a" # # Define NN Architecture # + _uuid="f8661c00b3363e610d2ed00fdb86b507a71a41c3" from keras.models import Model from keras.layers import Input, Dense, Embedding, Concatenate, Flatten, BatchNormalization, Dropout from keras.losses import binary_crossentropy categorical_inputs = [] for cat in cat_cols: categorical_inputs.append(Input(shape=[1], name=cat)) categorical_embeddings = [] for i, cat in enumerate(cat_cols): categorical_embeddings.append(Embedding(embed_sizes[i], 10)(categorical_inputs[i])) #categorical_logits = Concatenate()([Flatten()(cat_emb) for cat_emb in categorical_embeddings]) categorical_logits = Flatten()(categorical_embeddings[0]) categorical_logits = Dense(32,activation='relu')(categorical_logits) categorical_logits = Dropout(0.5)(categorical_logits) categorical_logits = BatchNormalization()(categorical_logits) categorical_logits = Dense(32,activation='relu')(categorical_logits) numerical_inputs = Input(shape=(len(num_cols),), name='num') numerical_logits = numerical_inputs numerical_logits = BatchNormalization()(numerical_logits) numerical_logits = Dense(128,activation='relu')(numerical_logits) numerical_logits = Dropout(0.5)(numerical_logits) numerical_logits = BatchNormalization()(numerical_logits) numerical_logits = Dense(128,activation='relu')(numerical_logits) numerical_logits = Dense(64,activation='relu')(numerical_logits) logits = Concatenate()([numerical_logits,categorical_logits]) logits = Dense(64,activation='relu')(logits) out = Dense(1, activation='sigmoid')(logits) model = Model(inputs = categorical_inputs + [numerical_inputs], outputs=out) model.compile(optimizer='adam',loss=binary_crossentropy) # + _uuid="fd6ac097d55ae74d4b7e660958c3bb00e8bc5569" from keras.utils import plot_model plot_model(model, to_file='TS-NN-model.png') # + _uuid="fecfb9cfcf401170cc8e010d38ff08c11d7387bb" from IPython.display import SVG from keras.utils.vis_utils import model_to_dot SVG(model_to_dot(model).create(prog='dot', format='svg')) # - model.summary() # + def get_input(market_train, indices): X_num = market_train.loc[indices, num_cols].values X = {'num':X_num} for cat in cat_cols: X[cat] = market_train.loc[indices, cat_cols].values y = (market_train.loc[indices,'returnsOpenNextMktres10'] >= 0).values r = market_train.loc[indices,'returnsOpenNextMktres10'].values u = market_train.loc[indices, 'universe'] d = market_train.loc[indices, 'date'] return X,y,r,u,d # r, u and d are used to calculate the scoring metric X_train,y_train,r_train,u_train,d_train = get_input(market_train, train_indices) X_valid,y_valid,r_valid,u_valid,d_valid = get_input(market_train, val_indices) # - # # Train NN model # + from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau check_point = ModelCheckpoint('models/TS-NN.hdf5',verbose=True, save_best_only=True) early_stop = EarlyStopping(patience=5,verbose=True) model.fit(X_train,y_train.astype(int), validation_data=(X_valid,y_valid .astype(int)), epochs=5, verbose=True, callbacks=[early_stop,check_point]) # - # # Evaluation of Validation Set # distribution of confidence that will be used as submission model.load_weights('models/TS-NN.hdf5') confidence_valid = model.predict(X_valid)[:,0]*2 - 1 print(accuracy_score(confidence_valid>0,y_valid)) plt.hist(confidence_valid, bins='auto') plt.title("validation confidence") plt.show() y_train y_valid r_valid plt.hist(r_valid, bins='auto') plt.title("validation returnsOpenNextMktres10") plt.show() # calculation of actual metric that is used to calculate final score r_valid = r_valid.clip(-1,1) # get rid of outliers. Where do they come from?? x_t_i = confidence_valid * r_valid * u_valid data = {'day' : d_valid, 'x_t_i' : x_t_i} df = pd.DataFrame(data) df.head() x_t = df.groupby('day').sum().values.flatten() mean = np.mean(x_t) std = np.std(x_t) score_valid = mean / std print(score_valid)
TS-NN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # This folder works towards creating a 'right' csv-file of the Fusus al-hikam. Its origin is a TSV-file created by <NAME> (accessed february 11) which has since then seen slight changes. This TSV-file is in turn based on the Lakhnawi edition of Ibn Arabi's Fusus al-hikam. # # ### Aim # My aim to create a clean and ready file `fusus.csv`. It is to strictly encapsulate the text of Fusus al-hikam, ready to be machine-readable. As the text is groomed and cleaned, `fusus.csv` is undergoing change, with the underlying code not preserved. This is especially true for columns right of 'word'. The column 'short' is meant to be the cleanest. It has been stripped of diacritics, shaddas, tatwīl, spaces, and punctuation. The columns to the right are annotations on it. # # ### Credits # Text by Ibn Arabi (finished in the year 1233). Edition by Nizam al-Din Ahmad al-Husayni al-Lakhnawi (Beirut: 2013). First conversion by <NAME> (January 2021). Finalization by <NAME> (February 2021). Mostly relying on Pandas, with some thankful use of a list of Arabic characters by <NAME> and <NAME> for CLTK. import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import arabicABC as abc # fusus = pd.read_csv('fusus.csv') fusus = fusus.fillna('') fusus.head() # Exploring a specific punctuation mark punctMark = fusus[fusus['haspunct']=='١‐'] punctMark # Moving punctuation from `short` to `After` and deleting from `haspunct` (aim is to get `haspunct` empty and delete it) def SetPunctuation(row): position = punctMark.iloc[row] punct = position.haspunct short = position.short if short[-2:] == punct: fusus.iloc[position.name, fusus.columns.get_loc('punctAfter')] = punct fusus.iloc[position.name, fusus.columns.get_loc('short')] = short.replace(punct,'') fusus.iloc[position.name, fusus.columns.get_loc('haspunct')] = "" # + # Uncomment to set function in motion for all cases of the particular punctuation # for i in range(0,len(punctMark)): # SetPunctuation(i) # - # Check results and possibly adjust details # look at one # fusus.iloc[26816] # look at line # fusus[(fusus['page']==273) & (fusus.line==8)] # change one value # fusus.iloc[6041, fusus.columns.get_loc("word")] = '[٣١ظهر]' fusus[(fusus['page']==47) & (fusus.line==7)] fusus.iloc[6041].word[2:] # fusus.iloc[6040, fusus.columns.get_loc("word")] # Which punctuation remains? We find out below puncs = fusus[fusus['haspunct']!=''].haspunct.unique().tolist() puncsLess = [] for punc in puncs: if '[' not in punc and ']' not in punc: puncsLess.append(punc) print(puncsLess) for punc in puncsLess: times = fusus[fusus['haspunct']==punc].shape[0] print(punc + " " + str(times)) # Some more functionality to check what is going on fusus.haspunct.str.contains(pat='').value_counts() fusus[fusus.haspunct.str.contains(pat='١١')] lijst = punctMark.index.tolist() lijst = [x-1 for x in lijst] lijst # first need to clean up `[zahr -#]`, which refer to MS Qunawi, and only then the poetry metre. Poetry can consist of two words (majzūʾ in front). 29817/304-9 needs to be split. Folio 53 is counted twice (in the MS). # These are the words right before poetry starts: indicate meter. fusus.iloc[lijst] allwahj = fusus[(fusus.word.str.contains(pat='ظهر')) & (fusus.word.str.contains(pat='\[')) & (fusus.word.str.contains(pat='\]'))].index.tolist() for i in allwahj: fusus.iloc[i, fusus.columns.get_loc("haspunct")] = "" fusus[(fusus.word.str.contains(pat='وجه')) & (fusus.word.str.contains(pat='\[')) & (fusus.word.str.contains(pat='\]'))] fusus.head() # Still have to get the QunawiMS annotated to the first next word fusus[(fusus.QunawiMS.notna())] fusus.QunawiMS = fusus.QunawiMS.ffill() wahj = fusus[fusus.word=='ظهر]'].index.tolist() for i in wahj: print(str(fusus.iloc[[i-1,i]].word.tolist()[0]) + str(fusus.iloc[[i-1,i]].word.tolist()[1])+ " at " + str(fusus.iloc[[i-1,i]].index.tolist()[0])) fusus[(fusus['page']==47) & (fusus.line==7)] fusus[(fusus.haspunct.str.contains(pat='\['))] empty = fusus[fusus.word==""].index.tolist() fusus.drop(fusus.index[empty], inplace=True) fusus.shape
fusust-text-laboratory/FususReworking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''codes'': conda)' # name: python37764bitcodesconda65f9a13fd3f3408ea5234ca36b878887 # --- # + [markdown] id="vjUDH_q9piRv" # # **Chapter 2** # + [markdown] id="R65DgLVlp2Rz" # #**Task1:** # # http://rosalind.info/problems/ba2a/ # # Implement MotifEnumeration # # # + colab={"base_uri": "https://localhost:8080/"} id="8T3wep0YqoU6" outputId="3c5b5831-4591-4893-f846-7a3eb8224b92" def neighbour(pattern, mismatch, words): if mismatch == 0: words.add(pattern) else: bases = ['A', 'T', 'C', 'G'] for i in range(len(pattern)): for j in range(len(bases)): new_pattern = pattern[:i] + bases[j] + pattern[i+1:] if mismatch <= 1: words.add(new_pattern) else: neighbour(new_pattern, mismatch-1, words) def MotifEnumeration(dna, k, d): patterns = [] for n in range(len(dna)): pattern = set() for i in range(len(dna[n]) - k + 1): kmerspattern = set() neighbour(dna[n][i:i + k], d, kmerspattern) for words in kmerspattern: pattern.add(words) for j in pattern: patterns.append(j) motifpattern = [] for element in patterns: if patterns.count(element) == len(dna): motifpattern.append(element) motifpattern = list(set(motifpattern)) return motifpattern if __name__ == "__main__": k, d = 3, 1 dna = ['ATTTGGC', 'TGCCTTA', 'CGGTATC', 'GAAAATT'] # with open("rosalind_ba2a.txt", "r") as f: # k, d = map(int, f.readline().strip().split()) # dna = [line.strip() for line in f] result = MotifEnumeration(dna, k, d) for i in result: print(i) # + [markdown] id="DtXMt8y7p2nF" # #**Task2:** # # http://rosalind.info/problems/ba2b/ # # Find a Median String # # # + colab={"base_uri": "https://localhost:8080/"} id="5ZZZ_pEiqo2M" outputId="f996d331-22f2-4483-e365-f129f980fee3" from itertools import product def pattern_generate(k): bases = ['A', 'C', 'G', 'T'] results = [] for c in product(bases, repeat=k): word = ''.join(c) results.append(word) return results def min_hamming_distance(pattern, string): min_distance = len(pattern) for i in range(len(string) - len(pattern) + 1): distance = sum([1 for j in range(len(pattern)) if pattern[j] != string[i:i+len(pattern)][j]]) if distance < min_distance: min_distance = distance return min_distance def find_median_string(k, dna): pattern = pattern_generate(k) distance_from_pattern = {} min_dist = len(dna) * len(pattern) for i in pattern: sum_dist = 0 for j in range(len(dna)): sum_dist += min_hamming_distance(i, dna[j]) distance_from_pattern[i] = sum_dist min_dist = min(sum_dist, min_dist) for i in distance_from_pattern.keys(): if distance_from_pattern[i] == min_dist: print(i) break if __name__ == "__main__": k = 3 dna=['AAATTGACGCAT', 'GACGACCACGTT', 'CGTCAGCGCCTG', 'GCTGAGCACCGG', 'AGTACGGGACAG'] # with open("rosalind_ba2b.txt", "r") as f: # k = int(f.readline().strip()) # dna = [line.strip() for line in f] find_median_string(k, dna) # + [markdown] id="Ey16rphrp2wW" # #**Task3:** # # http://rosalind.info/problems/ba2c/ # # Find a Profile-most Probable k-mer in a String # + colab={"base_uri": "https://localhost:8080/"} id="whNGNGCiqpTB" outputId="7c178bee-5051-4c8a-e57d-799188bcafbd" def probability_of_kmer(string, matrix): probability = 1.0 for i in range(len(string)): if string[i] == 'A': probability *= matrix[0][i] elif string[i] == 'C': probability *= matrix[1][i] elif string[i] == 'G': probability *= matrix[2][i] elif string[i] == 'T': probability *= matrix[3][i] return probability def profile_most_probable_kmer(string, k, matrix): kmer_probability = {} for i in range(len(string)-k+1): kmer_probability[string[i:i+k]] = probability_of_kmer(string[i:i+k], matrix) max_key = max(kmer_probability, key=kmer_probability.get) return max_key if __name__ == "__main__": string = 'ACCTGTTTATTGCCTAAGTTCCGAACAAACCCAATATAGCCCGAGGGCCT' k = 5 matrix = [[0.2, 0.2, 0.3, 0.2, 0.3], [0.4, 0.3, 0.1, 0.5, 0.1], [0.3, 0.3, 0.5, 0.2, 0.4], [0.1, 0.2, 0.1, 0.1, 0.2]] # with open("rosalind_ba2c.txt", "r") as f: # string = f.readline().strip() # k = int(f.readline().strip()) # matrix =[[float(l) for l in line.strip().split()] for line in f] print(profile_most_probable_kmer(string, k, matrix)) # + [markdown] id="5299Fp-8p25H" # #**Task4:** # # http://rosalind.info/problems/ba2d/ # # Implement GreedyMotifSearch # # # + colab={"base_uri": "https://localhost:8080/"} id="Wlw8JIH9qp8N" outputId="0d5c92c3-312c-4cd4-f5f3-95dd8a27dd50" def probability_of_kmer(string, matrix): probability = 1.0 for i in range(len(string)): if string[i] == 'A': probability *= matrix[0][i] elif string[i] == 'C': probability *= matrix[1][i] elif string[i] == 'G': probability *= matrix[2][i] elif string[i] == 'T': probability *= matrix[3][i] return probability def profile_most_probable_kmer(string, k, matrix): kmer_probability = {} for i in range(len(string)-k+1): kmer_probability[string[i:i+k]] = probability_of_kmer(string[i:i+k], matrix) max_key = max(kmer_probability, key=kmer_probability.get) return max_key def score(Motifs): count_score = 0 for i in range(len(Motifs[0])): j = [motif[i] for motif in Motifs] count_score += (len(j) - max(j.count("A"), j.count("C"), j.count("T"), j.count("G"))) return count_score def greedy_motif_search(Dna, k, t): BestMotifs = [dna[:k] for dna in Dna] for k_mer in [Dna[0][i:i+k] for i in range(len(Dna[0])-k+1)]: Motifs = [k_mer] for i in range(1, t): motifs = Motifs[:i] matrix = [] for base in ["A", "C", "G", "T"]: row = [] for j in range(k): col = [x[j] for x in motifs] #print(col) row.append(col.count(base)/len(motifs)) matrix.append(row) Motifs.append(profile_most_probable_kmer(Dna[i], k, matrix)) if score(Motifs) < score(BestMotifs): BestMotifs = Motifs return BestMotifs if __name__ == "__main__": k, t = 3, 5 Dna = ['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG'] # with open("rosalind_ba2d.txt", "r") as f: # k, t = map(int, f.readline().strip().split()) # Dna = [line.strip() for line in f] BestMotifs = greedy_motif_search(Dna, k ,t) print("\n".join(BestMotifs)) # + [markdown] id="S4JUedttp3BB" # #**Task5:** # # http://rosalind.info/problems/ba2e/ # # Implement GreedyMotifSearch with Pseudocounts # + colab={"base_uri": "https://localhost:8080/"} id="E5G0RzIjqqeW" outputId="66f3f39c-a4de-40aa-d4f7-b3156189a989" def probability_of_kmer(string, matrix): probability = 1.0 for i in range(len(string)): if string[i] == 'A': probability *= matrix[0][i] elif string[i] == 'C': probability *= matrix[1][i] elif string[i] == 'G': probability *= matrix[2][i] elif string[i] == 'T': probability *= matrix[3][i] return probability def profile_most_probable_kmer(string, k, matrix): kmer_probability = {} for i in range(len(string)-k+1): kmer_probability[string[i:i+k]] = probability_of_kmer(string[i:i+k], matrix) max_key = max(kmer_probability, key=kmer_probability.get) return max_key def score(Motifs): count_score = 0 for i in range(len(Motifs[0])): j = [motif[i] for motif in Motifs] count_score += (len(j) - max(j.count("A"), j.count("C"), j.count("T"), j.count("G"))) return count_score def greedy_motif_search(Dna, k, t): BestMotifs = [dna[:k] for dna in Dna] for k_mer in [Dna[0][i:i+k] for i in range(len(Dna[0])-k+1)]: Motifs = [k_mer] for i in range(1, t): motifs = Motifs[:i] matrix = [] for base in ["A", "C", "G", "T"]: row = [] for j in range(k): col = [x[j] for x in motifs] #print(col) row.append(col.count(base)+1/len(motifs)+4) matrix.append(row) Motifs.append(profile_most_probable_kmer(Dna[i], k, matrix)) if score(Motifs) < score(BestMotifs): BestMotifs = Motifs return BestMotifs if __name__ == "__main__": k, t = 3, 5 Dna = ['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG'] # with open("rosalind_ba2e.txt", "r") as f: # k, t = map(int, f.readline().strip().split()) # Dna = [line.strip() for line in f] BestMotifs = greedy_motif_search(Dna, k ,t) print("\n".join(BestMotifs)) # + [markdown] id="bn85caP6qS0J" # # **Task 6** # # http://rosalind.info/problems/ba2f/ # # Implement RandomizedMotifSearch # + colab={"base_uri": "https://localhost:8080/"} id="L1ojRDL9qcQo" outputId="b5b7c475-1f35-4c1d-fd7a-8529d2de20cb" import random def probability_of_kmer(string, matrix): probability = 1.0 for i in range(len(string)): if string[i] == 'A': probability *= matrix[0][i] elif string[i] == 'C': probability *= matrix[1][i] elif string[i] == 'G': probability *= matrix[2][i] elif string[i] == 'T': probability *= matrix[3][i] return probability def profile_most_probable_kmer(string, k, matrix): kmer_probability = {} for i in range(len(string)-k+1): kmer_probability[string[i:i+k]] = probability_of_kmer(string[i:i+k], matrix) max_key = max(kmer_probability, key=kmer_probability.get) return max_key def get_score(Motifs): count_score = 0 for i in range(len(Motifs[0])): j = [motif[i] for motif in Motifs] count_score += (len(j) - max(j.count("A"), j.count("C"), j.count("T"), j.count("G"))) return count_score def get_random_motifs(Dna, k, t): motifs = [] kmer_index = [random.randint(0, len(Dna[0]) - k) for i in range(len(Dna))] motifs = [Dna[i][j:j+k] for i, j in enumerate(kmer_index)] return motifs def create_profile_with_pseudocounts(motifs): matrix = [] for base in ["A", "C", "G", "T"]: row = [] for j in range(len(motifs[0])): col = [x[j] for x in motifs] #print(col) row.append(float(col.count(base) + 1) / float(len(motifs) + 4)) matrix.append(row) #print(matrix) return matrix def randomized_motif_search(Dna, k, t): Motifs = get_random_motifs(Dna, k, t) BestMotifs = Motifs BestScore = get_score(BestMotifs) while True: Profile = create_profile_with_pseudocounts(Motifs) #print(Profile) Motifs = [profile_most_probable_kmer(Dna[i], k, Profile) for i in range(len(Dna))] current_score = get_score(Motifs) if current_score < BestScore: BestMotifs = Motifs BestScore = current_score else: return BestMotifs, BestScore if __name__ == "__main__": # with open("rosalind_ba2f.txt", "r") as f: # k, t = map(int, f.readline().strip().split()) # Dna = [line.strip() for line in f] k, t = 8, 5 Dna = ['CGCCCCTCTCGGGGGTGTTCAGTAAACGGCCA', 'GGGCGAGGTATGTGTAAGTGCCAAGGTGCCAG', 'TAGTACCGAGACCGAAAGAAGTATACAGGCGT', 'TAGATCAAGTTTCAGGTGCACGTCGGTGAACC', 'AATCCACCAGCTCCACGTGCAATGTTGGCCTA'] allScores = [] allMotifs = [] for i in range(1000): motifs, scores = randomized_motif_search(Dna, k, t) allScores.append(scores) allMotifs.append(motifs) BestMotifs = allMotifs[allScores.index(min(allScores))] print("\n".join(BestMotifs)) # + [markdown] id="x_GBeFUcqqtv" # # **Task 7** # # http://rosalind.info/problems/ba2g/ # # Implement GibbsSampler # + colab={"base_uri": "https://localhost:8080/"} id="c_Xj6qmBqrA_" outputId="56b65267-d086-467c-b718-4b0801a290be" import random from random import randint def probability_of_kmer(string, matrix): probability = 1.0 for i in range(len(string)): if string[i] == 'A': probability *= matrix[0][i] elif string[i] == 'C': probability *= matrix[1][i] elif string[i] == 'G': probability *= matrix[2][i] elif string[i] == 'T': probability *= matrix[3][i] return probability def profile_most_probable_kmer(string, k, matrix): kmer_probability = {} for i in range(len(string)-k+1): kmer_probability[string[i:i+k]] = probability_of_kmer(string[i:i+k], matrix) max_key = max(kmer_probability, key=kmer_probability.get) return max_key def get_score(Motifs): count_score = 0 for i in range(len(Motifs[0])): j = [motif[i] for motif in Motifs] count_score += (len(j) - max(j.count("A"), j.count("C"), j.count("G"), j.count("T"))) return count_score def get_random_motifs(Dna, k, t): motifs = [] kmer_index = [random.randint(0, len(Dna[0]) - k) for i in range(len(Dna))] motifs = [Dna[i][j:j+k] for i, j in enumerate(kmer_index)] return motifs def create_profile_with_pseudocounts(motifs): matrix = [] for base in ["A", "C", "G", "T"]: row = [] for j in range(len(motifs[0])): col = [x[j] for x in motifs] #print(col) row.append(float(col.count(base) + 1) / float(t + 4)) matrix.append(row) #print(matrix) return matrix def GibbsSampler(Dna, k, t, N): Motifs = get_random_motifs(Dna, k, t) BestMotifs = Motifs BestScore = get_score(BestMotifs) for j in range(N): i = random.randint(0, t-1) Motifs.pop(i) Profile = create_profile_with_pseudocounts(Motifs) #print(Profile) motif = profile_most_probable_kmer(Dna[i], k, Profile) #print(motif) Motifs.insert(i, motif) current_score = get_score(Motifs) if current_score < BestScore: BestMotifs = Motifs BestScore = current_score return BestMotifs, BestScore if __name__ == "__main__": k, t, N = 8, 5, 100 Dna = ['CGCCCCTCTCGGGGGTGTTCAGTAAACGGCCA', 'GGGCGAGGTATGTGTAAGTGCCAAGGTGCCAG', 'TAGTACCGAGACCGAAAGAAGTATACAGGCGT', 'TAGATCAAGTTTCAGGTGCACGTCGGTGAACC', 'AATCCACCAGCTCCACGTGCAATGTTGGCCTA'] # with open("rosalind_ba2g.txt", "r") as f: # k, t, N = map(int, f.readline().strip().split()) # Dna = [f.readline().strip() for _ in range(t)] allScores = [] allMotifs = [] min_score = 9999999999999999 best_motifs = None for i in range(20): motifs, scores = GibbsSampler(Dna, k, t, N) if scores < min_score: min_score = scores best_motifs = motifs print(*best_motifs, sep='\n') # + [markdown] id="RYBtPwFqK0t3" # # **Task 8** # # http://rosalind.info/problems/ba2h/ # # Implement DistanceBetweenPatternAndStrings # + colab={"base_uri": "https://localhost:8080/"} id="z5cPD6ZmK06I" outputId="939d5629-6394-4460-ad1a-38c62fa86edc" def hammingDist(genome, kmer): dist = [] for i in range(len(genome) - len(kmer) + 1): word = genome[i:i+len(kmer)] dist.append(sum(x != y for x, y in zip(word, kmer))) return(min(dist)) def DistanceBetweenPatternAndStrings(pattern, dna): distance = 0 for seq in dna: tempDist = [hammingDist(pattern, seq[i : i + len(pattern)]) for i in range(len(seq) - len(pattern) + 1)] minTempDist = min(tempDist) distance += minTempDist return(distance) if __name__ == "__main__": pattern = 'AAA' dna = ['TTACCTTAAC', 'GATATCTGTC', 'ACGGCGTTCG', 'CCCTAAAGAG', 'CGTCAGAGGT'] # with open(filename, 'r') as f: # pattern = f.readline().rstrip() # dna = f.readline().rstrip().split() print(DistanceBetweenPatternAndStrings(pattern, dna)) # + id="vqBWLt9QbLpX"
Chapter_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## What does time.time() measure? # What does the `time.time()` function exactly measure? # The time since a predefined date and time set by the operating system. # ## Measuring time I # # In the lecture slides, you saw how the `time.time()` function can be loaded and used to assess the time required to perform a basic mathematical operation. # # Now, you will use the same strategy to assess two different methods for solving a similar problem: calculate the sum of squares of all the positive integers from 1 to 1 million (1,000,000). # # Similar to what you saw in the video, you will compare two methods; one that uses brute force and one more mathematically sophisticated. # # In the function `formula`, we use the standard formula # # ![formula](https://i.imgur.com/9j3BELw.png) # # where N=1,000,000. # # In the function `brute_force` we loop over each number from 1 to 1 million and add it to the result. # # Instructions # # 1. Calculate the result of the problem using the `formula()` function. # 2. Print the time required to calculate the result using the `formula()` function. # 3. Calculate the result of the problem using the `brute_force()` function. # 4. Print the time required to calculate the result using the `brute_force()` function. # + # Define functions def brute_force(N): res = 0 UL = N+1 for i in range(1,UL): res+=i^2 return res def formula(N): return N*(N+1)*(2*N+1)/6 # + # Import time import time N = 1000000 # Calculate the result of the problem using formula() and print the time required fm_start_time = time.time() first_method = formula(N) print('Time using formula: {} sec'.format(time.time() - fm_start_time)) # Calculate the result of the problem using brute_force() and print the time required sm_start_time = time.time() second_method = brute_force(N) print('Time using brute force: {} sec'.format(time.time() - sm_start_time)) # - # ## Measuring time II # # As we discussed in the lectures, **in the majority of cases**, a list comprehension is faster than a for loop. # # In this demonstration, you will see a case where a list comprehension and a for loop have so small difference in efficiency that choosing either method will perform this simple task instantly. # # In the list `words`, there are random words downloaded from the Internet. We are interested to create another list called `listlet` in which we only keep the words that start with the letter `b`. # # In case you are not familiar with dealing with strings in Python, each string has the `.startswith()` attribute, which returns a True/False statement whether the string starts with a specific letter/phrase or not. # # Instructions # # 1. Assign the time **before** the execution of the list comprehension # 2. Assign the time **after** the execution of the list comprehension # 3. Assign the time **before** the execution of the for loop # 4. Assign the time **after** the execution of the for loop words = ['<html>', '<head><title>404 Not Found</title></head>', '<body>', '<center><h1>404 Not Found</h1></center>', '<hr><center>nginx</center>', '</body>', '</html>'] # + # Store the time before the execution start_time = time.time() # Execute the operation letlist = [wrd for wrd in words if wrd.startswith('b')] # Store and print the difference between the start and the current time total_time_lc = time.time() - start_time print(f'Time using list comprehension: {total_time_lc} sec') # + # Store the time before the execution start_time = time.time() # Execute the operation letlist = [] for wrd in words: if wrd.startswith('b'): letlist.append(wrd) # Print the difference between the start and the current time total_time_fl = time.time() - start_time print(f'Time using for loop: {total_time_fl} sec') # - # ## Row selection: loc[] vs iloc[] # # A big part of working with DataFrames is to locate specific entries in the dataset. You can locate rows in two ways: # # - By a specific value of a column (feature). # - By the index of the rows (index). In this exercise, we will focus on the second way. # # If you have previous experience with pandas, you should be familiar with the `.loc` and `.iloc` indexers, which stands for 'location' and 'index location' respectively. In most cases, the indices will be the same as the position of each row in the Dataframe (e.g. the row with index 13 will be the 14th entry). # # While we can use both functions to perform the same task, we are interested in which is the most efficient in terms of speed. # # Instructions # # 1. Store the indices of the first 1000 rows in the `row_nums`. # 2. Use the `.loc[]` indexer to select the first 1000 rows of `poker_hands`, and record the times before and after that operation. # 3. Print the time it took to select the rows. # 4. Use the `.iloc[]` indexer with `row_nums` to select the first 1000 rows of the DataFrame `poker_hands`, and print how much time it took (as the difference between the time after the selection and the time before the selection) # 5. If you need to select specific **rows** of a DataFrame, which function is more efficient, it terms of speed? # + # Import pandas import pandas as pd # Import dataset poker_hands = pd.read_csv('poker_hands.csv') # + # Define the range of rows to select: row_nums row_nums = range(0, 1000) # Select the rows using .loc[] and row_nums and record the time before and after loc_start_time = time.time() rows = poker_hands.loc[row_nums] loc_end_time = time.time() # Print the time it took to select the rows using .loc[] print(f'Time using .loc[]: {loc_end_time - loc_start_time} sec') # + # Select the rows using .iloc[] and row_nums and record the time before and after iloc_start_time = time.time() rows = poker_hands.iloc[row_nums] iloc_end_time = time.time() # Print the time it took to select the rows using .iloc print(f'Time using .iloc[]: {iloc_end_time - iloc_start_time} sec') # - # ## Column selection: .iloc[] vs by name # # In the previous exercise, you saw how the `.loc[]` and `.iloc[]` functions can be used to locate specific _rows_ of a DataFrame (based on the index). Turns out, the `.iloc[]` function performs a lot faster (~ 2 times) for this task! # # Another important task is to find the faster function to select the targeted _features (columns)_ of a DataFrame. In this exercise, we will compare the following: # # - using the index locator `.iloc()`. # - using the names of the columns. # # While we can use both functions to perform the same task, we are interested in which is the most efficient in terms of speed. # # In this exercise, you will continue working with the poker data which is stored in `poker_hands`. Take a second to examine the structure of this DataFrame by calling `poker_hands.head()` in the console! # # Instructions # # 1. Use the `.iloc` indexer to select the first, third, fourth, sixth and seventh column (`'S1'`, `'S2'`, `'R2'`, `'R3'`, `'S4'`) of the DataFrame `poker_hands` by their _index_ and find the time it took. # 2. Select the first, third, fourth, sixth and seventh column (`'S1'`, `'S2'`, `'R2'`, `'R3'`, `'S4'`) of the DataFrame `poker_hands` by their _names_ and time this operation. # 3. If you need to select a specific **column** (or **columns**) of a DataFrame, which function is more efficient, it terms of speed? # + # Use .iloc to select the first, fourth, fifth, seventh and eighth column and record the times before and after iloc_start_time = time.time() cols = poker_hands.iloc[:,[0,3,4,6,7]] iloc_end_time = time.time() # Print the time it took print(f'Time using .iloc[]: {iloc_end_time - iloc_start_time} sec') # + # Use simple column selection to select the first, fourth, fifth, seventh and eighth column and record the times before and after names_start_time = time.time() cols = poker_hands[['S1', 'S2', 'R2', 'R3', 'S4']] names_end_time = time.time() # Print the time it took print(f'Time using selection by name: {names_end_time - names_start_time} sec') # - # ## Random row selection # # In this exercise, you will compare the two methods described for selecting random rows (entries) with replacement in a `pandas` DataFrame: # # - The built-in `pandas` function `.random()` # - The `NumPy` random integer number generator `np.random.randint()` # # Generally, in the fields of statistics and machine learning, when we need to train an algorithm, we train the algorithm on the 75% of the available data and then test the performance on the remaining 25% of the data. # # For this exercise, we will randomly sample the 75% percent of all the played poker hands available, using each of the above methods, and check which method is more efficient in terms of speed. # # Instructions # # 1. Randomly select 75% of the rows of the poker dataset using the `np.random.randint()` method. # 2. Randomly select 75% of the rows of the poker dataset using the `.sample()` method. Make sure to specify the axis correctly! # 3. Between `np.random.randint()` and `.sample()`, which one is faster when selecting random rows from a pandas DataFrame? # + # Extract number of rows in dataset N = poker_hands.shape[0] # Select and time the selection of the 75% of the dataset's rows rand_start_time = time.time() poker_hands.iloc[np.random.randint(low=0, high=N, size=int(0.75 * N))] print(f'Time using Numpy: {time.time() - rand_start_time} sec') # - # Select and time the selection of the 75% of the dataset's rows using sample() samp_start_time = time.time() poker_hands.sample(int(0.75 * N), axis=0, replace=True) print(f'Time using .sample: {time.time() - samp_start_time} sec') # ## Random column selection # # In the previous exercise, we examined two ways to select random rows from a `pandas` DataFrame. We can use the same functions to randomly select columns in a `pandas` DataFrame. # # To randomly select 4 columns out of the poker dataset, you will use the following two functions: # # - The built-in `pandas` function `.random()` # - The `NumPy` random integer number generator `np.random.randint()` # # Instructions # # 1. Randomly select 4 columns from the `poker_hands` dataset using `np.random.randint()`. # 2. Randomly select 4 columns from the `poker_hands` dataset using the `.sample()` method. # 3. # + # Extract number of columns in dataset D = poker_hands.shape[1] # Select and time the selection of 4 of the dataset's columns using NumPy np_start_time = time.time() poker_hands.iloc[:,np.random.randint(low=0, high=D, size=4)] print(f"Time using NymPy's random.randint(): {time.time() - np_start_time} sec") # - # Select and time the selection of 4 of the dataset's columns using pandas pd_start_time = time.time() poker_hands.sample(4, axis=1) print(f"Time using panda's .sample(): {time.time() - pd_start_time} sec")
writing_efficient_code_with_pandas/1_selecting_columns_and_rows_efficiently.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Projeto Parte 1 - Pipeline de Treinamento # Nome: <NAME> (<EMAIL>) # ### 1. Data extraction # Loads a dataset with product data from a specified path available in the environment variable DATASET_PATH. # + import numpy as np import pandas as pd import os import sklearn # variaveis de ambiente # !cat .env # - # data-frame original DATASET_PATH=os.getenv("DATASET_PATH") df_original = pd.read_csv(DATASET_PATH) # ## 2. Data formatting # Processes the dataset to use it for training and validation. # + # X -> concatenated_tags # y -> category # remove valores null e seleciona as colunas para X e y e verifica a distribuicao conforme a categoria df = df_original[['category','concatenated_tags','price','seller_id','weight']].dropna() print("Tamanho do data frame: "+ str(len(df))) series = df['category'].value_counts() print(series/len(df)*100) # - # extrai o target_names e y target_names = [category for category in df.category.unique()] y = [target_names.index(category) for category in df.category] y = pd.DataFrame(y) # extrai os valores para X feature_names = ['concatenated_tags'] X = df[feature_names] # + from sklearn.model_selection import train_test_split # divide os dados para treino e teste de modo estratificado X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, #10% para teste train stratify=y, random_state=420) #semente print("Tamanho para treino: "+ str(len(y_train))) dy_train = pd.DataFrame([target_names[v] for v in y_train[0]], columns= ['category']) print(dy_train['category'].value_counts()/len(dy_train)*100) print("Tamanho para teste: "+ str(len(y_test))) dy_test = pd.DataFrame([target_names[v] for v in y_test[0]], columns= ['category']) print(dy_test['category'].value_counts()/len(dy_test)*100) # + from sklearn.feature_extraction.text import CountVectorizer # bag-of-words com bigrams count_vect = CountVectorizer(ngram_range=(1, 2),max_features=None,max_df=0.5) X_train_counts = count_vect.fit_transform(X_train['concatenated_tags']) X_train_counts.shape # + from sklearn.feature_extraction.text import TfidfTransformer # tfidf ajuste na frequencia tfidf_transformer = TfidfTransformer(norm='l1',use_idf=True) X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts) X_train_tfidf.shape # - # ## 3. Modeling # Specifies a model to handle the categorization problem. # + from sklearn.metrics import accuracy_score from sklearn.model_selection import StratifiedKFold from sklearn.naive_bayes import ComplementNB from sklearn import metrics # abrir o arquivo de metricas METRICS_PATH=os.getenv("METRICS_PATH") with open(METRICS_PATH, "w") as f: print("MÉTRICAS NO CROSS-VALIDATION (n_splits=5)", file=f) # cross validation com n_splits = 5 skf = StratifiedKFold(n_splits=5) acc_dt = [] # pega um pedaco para treino outro para validacao em X e Y for tr_idx, vl_idx in skf.split(X_train_tfidf, y_train): X_train_f, X_valid_f = X_train_tfidf[tr_idx], X_train_tfidf[vl_idx] y_train_f, y_valid_f = y_train.iloc[tr_idx], y_train.iloc[vl_idx] clf = ComplementNB(alpha=0.01).fit(X_train_f, y_train_f.values.ravel()) y_pred_f = clf.predict(X_valid_f) acc_dt.append(accuracy_score(y_valid_f, y_pred_f)) with open(METRICS_PATH, "a") as f: print(metrics.classification_report(y_valid_f, y_pred_f,target_names=target_names), file=f) print("Acurácia média =",np.mean(acc_dt)*100," %.") # - # ## 4. Model validation # Generates metrics about the model accuracy (precision, recall, F1, etc.) for each category and exports them to a specified path available in the environment variable METRICS_PATH. # + # treina o modelo com o conjunto de treino clfAll = ComplementNB(alpha=0.01).fit(X_train_tfidf, y_train.values.ravel()) # calcula o erro do classificador nos dados de teste X_test_counts = count_vect.transform(X_test['concatenated_tags']) X_test_tfidf = tfidf_transformer.transform(X_test_counts) y_pred = clfAll.predict(X_test_tfidf) print("Acurácia =",accuracy_score(y_test, y_pred)*100," %.") # + from sklearn import metrics # metricas print(metrics.classification_report(y_test, y_pred,target_names=target_names)) # + # exporta para o arquivo as metricas METRICS_PATH=os.getenv("METRICS_PATH") with open(METRICS_PATH, "a") as f: print("MÉTRICAS NO TESTE", file=f) print(metrics.classification_report(y_test, y_pred,target_names=target_names), file=f) # - # visualiza o arquivo gerado # !cat {METRICS_PATH} # ## 5. Model exportation # Exports a candidate model to a specified path available in the environment variable MODEL_PATH. # + # pipeline para exportar o modelo selecionado from sklearn.pipeline import Pipeline model = Pipeline([ ('vect', CountVectorizer(ngram_range=(1, 2),max_features=None,max_df=0.5)), ('tfidf', TfidfTransformer(norm='l1',use_idf=True)), ('clf', ComplementNB(alpha=0.01)), ]) # treina o modelo com todos os dados do conjunto de treino model.fit(X_train['concatenated_tags'], y_train.values.ravel()) # predict com os dados de teste predicted = model.predict(X_test['concatenated_tags']) np.mean(predicted == y_test.values.ravel()) # + import pickle # gera o arquivo pkl MODEL_PATH=os.getenv("MODEL_PATH") with open(MODEL_PATH, 'wb') as file: pickle.dump(model, file) # + # testa o arquivo pkl with open(MODEL_PATH, 'rb') as file: pickle_model = pickle.load(file) score = pickle_model.score(X_test['concatenated_tags'], y_test) print("Test score: {0:.2f} %".format(100 * score)) Ypredict = pickle_model.predict(X_test['concatenated_tags']) # + ### OTIMIZACAO DE HIPERPARAMETROS ### TRECHO COMENTADO POR CONTA DO TEMPO DE EXECUCAO #from sklearn.model_selection import GridSearchCV #model = Pipeline([ # ('vect', CountVectorizer()), # ('tfidf', TfidfTransformer()), # ('clf', ComplementNB()), #]) #parameters = { # 'vect__max_df': (0.5, 0.75, 1.0), # 'vect__max_features': (None, 5000, 10000, 50000), # 'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams # 'tfidf__use_idf': (True, False), # 'tfidf__norm': ('l1', 'l2'), # 'clf__alpha': (0.01, 0.1, 0.5, 1.0, 10.0) # #'clf__penalty': ('l2', 'elasticnet'), # #'clf__n_iter': (10, 50, 80), #} #setting up the grid search #gs=GridSearchCV(model,parameters,n_jobs=-1,cv=5) ##fitting gs to training data #gs.fit(X_train['concatenated_tags'], y_train.values.ravel()) #print(gs.best_params_) ##print(gs_clf.best_score_) #print(gs.best_score_) #print("Grid scores on development set:") #print() #means = gs.cv_results_['mean_test_score'] #stds = gs.cv_results_['std_test_score'] #for mean, std, params in zip(means, stds, gs.cv_results_['params']): # print("%0.3f (+/-%0.03f) for %r" # % (mean, std * 2, params)) #print() ### RESULTADO DO AJUSTE DE HIPERPARAMETROS #{'clf__alpha': 0.01, 'tfidf__norm': 'l1', 'tfidf__use_idf': True, 'vect__max_df': 0.5, 'vect__max_features': None, 'vect__ngram_range': (1, 2)} #0.8778290660120106
training/trainer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %cd /home/jovyan/work from chordifier.KeyboardRenderer import KeyboardRenderer from chordifier.experiments.KeyboardFactory import make # - keyboard = make([0, 0, 0, 0, 2, 2, 0, 0, 0, 0]) KeyboardRenderer(keyboard, 'Handheld keyboard, operated with thumbs').present() from chordifier.algorithm.Pipeline import Pipeline from chordifier.utils import vector # + # %%time global PARAMETERS PARAMETERS = { # general 'keyboard': keyboard, 'characters': 35, # sequencer 'filename': "dataset/ngram.json", 'length': 3, 'samples': 1000, # dynamics 'x_y_ratio': 1.5, 'stiffness': vector(1.8, 1.3, 1.2, 1.1, 2, 2, 1.1, 1.2, 1.3, 1.8), # pruner 'priority': vector(18000, 13000, 12000, 11000, 14500, 14516, 11008, 12004, 13002, 18001), 'finger_priorities': 0.1, 'average_offsets': 0.5, 'deviation_of_offsets': 2, # evaluator 'distances_travelled': 1, 'chord_difficulties': 1, # optimizer 'best': True, 'generations': 200, 'population_size': 300, 'mate_probability': 0.5, 'mutate_probability': 0.2, 'mutate_independent_probability': 0.1, 'select_tournament_size': 30, } global pipeline_best pipeline_best = Pipeline(PARAMETERS) pipeline_best.prepare() global result_best result_best = pipeline_best.optimize() # + # %%time PARAMETERS["best"] = False global pipeline_worst pipeline_worst = Pipeline(PARAMETERS) pipeline_worst.prepare() global result_worst result_worst = pipeline_worst.optimize() # + from bokeh.plotting import figure, show, ColumnDataSource from bokeh.palettes import Spectral4 def plot(title, logs): source = ColumnDataSource({ 'generation': [item['gen'] for item in logs], 'maximum': [item['maximum'] for item in logs], 'average': [item['average'] for item in logs], 'minimum': [item['minimum'] for item in logs], 'deviation': [item['deviation'] for item in logs], }) tooltips = [ ('generation', '@generation'), ('maximum', '@maximum'), ('average', '@average'), ('minimum', '@minimum'), ('deviation', '@deviation'), ] f = figure(title=title, tools='pan,wheel_zoom,reset', height=600, sizing_mode='stretch_width', tooltips=tooltips) f.line('generation', 'maximum', line_width=4, source=source, legend='maximum ', color=Spectral4[2]) f.line('generation', 'average', line_width=4, source=source, legend='average ', color=Spectral4[0]) f.line('generation', 'minimum', line_width=4, source=source, legend='minimum ', color=Spectral4[1]) f.circle('generation', 'maximum', fill_color='white', size=8, source=source, color=Spectral4[2]) f.circle('generation', 'average', fill_color='white', size=8, source=source, color=Spectral4[0]) f.circle('generation', 'minimum', fill_color='white', size=8, source=source, color=Spectral4[1]) show(f) # - plot('Optimizing for best chords', pipeline_best.optimizer.logs[1]) plot('Optimizing for worst chords', pipeline_worst.optimizer.logs[1]) # + import pprint pp = pprint.PrettyPrinter(indent=4) print("=== Results ===") print("--- best ---") print("fitness: ", -pipeline_best.optimizer.winner.keys[0].wvalues[0]) print("mapping:") pp.pprint(result_best) print("--- worst ---") print("fitness: ", pipeline_worst.optimizer.winner.keys[0].wvalues[0]) print("mapping:") pp.pprint(result_worst) # -
lab/optimize_thumb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 연속형 수치 데이터의 이산형화(Discretize) # # * 수치적 데이터를 개별적인 구간으로 나눈다. # * 이산형화를 통하여 수치 특성을 범주형 데이터로 변환할 수 있다. # * 이산형화(discretization)은 연속형 변수를 2개 이상의 범주(category)를 가지는 변수로 변환해주는 것을 말한다. # import numpy as np import pandas as pd age = np.array([[6], [12], [20], [36], [65] ]) # ### np.digitize() # 수치적 특성을 여러 임계값에 따라 나누는 방법 # np.digitize(age, bins=[20,30,64]) # * bins 매개변수의 입력값은 각 구간의 왼쪽 경계값이다. # * [~ 20), [20, 30), [30, 64) , [64 ~ ) 4개 구간으로 나뉜다. # * right = True를 설정하여 변경할 수 있다. np.digitize(age, bins=[20,30,64], right=True) # ### np.where(condition, factor1, factor2, ...)를 이용한 연속형 변수의 이산형화 # x = np.arange(100) np.where(x >= x.mean(), 'high', 'low') # ## Binning # # 수치형 데이터를 범주형 데이터로 변환할 수 있다. 숫자데이터를 카테고리화 하는 기능을 가지고 있다. # * pd.cut() : 나누는 구간의 경계값을 지정하여 구간을 나눈다. # * pd.qcut() : 구간 경계선을 지정하지 않고 데이터 갯수가 같도록 지정한 수의 구간으로 나눈다. # # ages = [20, 22, 25, 27, 21, 23, 37, 31, 61, 45, 41, 32] bins = [18, 25, 35, 60, 100] # ### pd.cut() - 동일 길이로 나누어서 범주 만들기(equal-length buckets categorization) # # * pd.cut()함수는 인자로 (카테고리화 숫자데이터, 구간의 구분값)를 넣어 쉽게 카테고리화 할 수 있다. # * pd.cut()함수로 잘린 데이터는 카테고리 자료형 Series로 반환되게 된다. # # ages가 5개의 구간 분값에 의해 4구간의 카테고리 자료형으로 반환된다. # 18 ~ 25 / 25 ~ 35 / 35 ~ 60 / 60 ~ 100 이렇게 총 4구간 cats = pd.cut(ages,bins) cats # cats.codes 를 통해, ages의 각 성분이 몇번째 구간에 속해있는지 정수index처럼 표시되는 것을 알 수 있다. # 20은 0=첫번째 구간에, 27은 1=두번째 구간에 속한다는 것을 알 수 있다. cats.codes # cats.value_counts() 를 통해서, 값x 각 구간에 따른 성분의 갯수를 확인할 수 있다. # value_counts()는 카테고리 자료형(Series)에서 각 구간에 속한 성분의 갯수도 파악할 수 있다. cats.value_counts() # pd.cut()을 호출시, labes = [ 리스트]형식으로 인자를 추가하면 각 카테고리명을 직접 지정해 줄 수 있다. # + group_names = ["Youth", "YoungAdult", "MiddleAged", "Senior"] pd.cut(ages, bins, labels= group_names) # - # #### pd.cut() 구간의 개수로 나누기 # 2번째 인자에서 각 구간 구분값(bins)이 리스트형식으로 넣어줬던 것을 –> # 나눌 구간의 갯수만 입력해준다. # (성분의 최소값 ~ 최대값를 보고 동일 간격으로 구간을 나눈다.) # + import numpy as np data = np.random.rand(20) data # - # 20개의 data성분에 대해, 동일한 길이의 구간으로 4개를 나누었고, # 기준은 소수2번째 자리까지를 기준으로 한다. pd.cut(data, 4, precision = 2 ) # ### pd.qcut() - 동일 개수로 나누어서 범주 만들기 (equal-size buckets categorization) # # pandas에서는 qcut이라는 함수도 제공한다. # * 지정한 갯수만큼 구간을 정의한다. # * pd.cut() 함수는 최대값 쵯소값만 고려해서 구간을 나눈 것에 비해 # * pd.qcut() 함수는 데이터 분포를 고려하여 각 구간에 동일한 양의 데이터가 들어가도록 분위 수를 구분값으로 구간을 나누는 함수다. data2 = np.random.randn(100) data2 cats = pd.qcut(data2, 4) # * cats = pd.qcut(data2, 4)를 통해 4개의 구간을 나눈다. # * 최소값<—>최대값 사이를 4등분 하는 것이 아니라, 분포까지 고려해서 4분위로 나눈 다음, 구간을 결정하게 된다. # * cut함수와 달리, 각 구간의 길이가 동일하다고 말할 수 없다. cats # ### sklearn.preprocessing.Binarizer() # sklearn.preprocessing.Binarizer()를 사용해서 연속형 변수를 특정 기준값 이하(equal or less the threshold)이면 '0', 특정 기준값 초과(above the threshold)이면 '1'의 두 개의 값만을 가지는 변수로 변환하는 방법 from sklearn.preprocessing import Binarizer # 20을 기준으로 데이터를 2개 범주로 나눈다. binarizer = Binarizer(20) binarizer.fit_transform(age) # ## version 차이 # # * sklearn 0.23 이상 버전에서 sklearn.preprocessing.KBinsDiscretizer() 사용 가능하다. # * 더 낮은 버전에서 아래의 코드는 에러가 날 수 있다. # ### sklearn.preprocessing.KBinsDiscretizer() # 연속적인 특성값을 여러 구간으로 나누어 준다. 나눌 구간 개수를 지정한다. # # * encode : # * 기본값은 'onehot'으로 one-hot encode된 희소행렬을 리턴한다. # * 'onehot-dense'는 밀집 배열을 리턴한다. # * 'ordinal'은 순차적 범주값을 리턴한다. # * strategy : # * 'quantile': 각 구간에 포함된 데이터 갯수가 서로 비슷하도록 만든다. # * 'uniform': 구간의 폭이 동일하도록 만든다. # * 구간의 값은 bin_edges_ 속성으로 확인할 수 있다. from sklearn.preprocessing import KBinsDiscretizer # + kb = KBinsDiscretizer(4, encode='ordinal', strategy='quantile') kb.fit_transform(age) # + kb = KBinsDiscretizer(4, encode='onehot-dense', strategy='quantile') kb.fit_transform(age) # + kb = KBinsDiscretizer(4, encode='onehot-dense', strategy='uniform') kb.fit_transform(age) # - # 구간의 값 kb.bin_edges_
05DataPreprocess/03Discretize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="tqnDVxRZgy6t" #Importing Libraries import numpy as np import pandas as pd from sklearn import datasets from sklearn.cluster import KMeans # + colab={"base_uri": "https://localhost:8080/"} id="JzwLM-CKg_NE" outputId="db2acf90-be14-4097-aaaf-216123f4dbd6" dataset=datasets.load_breast_cancer() dataset # + colab={"base_uri": "https://localhost:8080/"} id="1__RciQ5iV1f" outputId="d920fb48-37bb-4d36-8847-fb7885ccf567" print(dataset.data.shape) print(dataset.target.shape) # + colab={"base_uri": "https://localhost:8080/"} id="aXt2-1GUibfc" outputId="cceb8cc5-54fa-4824-9cd7-ba0e88b91a52" kmeans = KMeans(n_clusters=10, random_state=92) prediction = kmeans.fit_predict(dataset.data) prediction # + colab={"base_uri": "https://localhost:8080/"} id="7bP4650BiiQ7" outputId="2ab92043-f5c6-432a-e15b-8e5239b62e3b" kmeans.cluster_centers_.shape # + id="TgxTTgv-in-j" import numpy as np from scipy.stats import mode labels = np.zeros_like(prediction) for i in range(10): mask = (prediction == i) labels[mask] = mode(dataset.target[mask])[0] # + id="STc0DZopisL9" colab={"base_uri": "https://localhost:8080/"} outputId="22e6895c-a68c-4ce0-849b-7d24fcab64a1" from sklearn.metrics import accuracy_score accuracy_score(dataset.target, labels) # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="2apKfHZrit12" outputId="c8c73e2e-a275-46ea-c88e-80e5944acc03" from sklearn.metrics import confusion_matrix import seaborn as sns import matplotlib.pyplot as plt mat = confusion_matrix(dataset.target, labels) ax = sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,xticklabels=dataset.target_names,yticklabels=dataset.target_names,cmap='Greens') #ax.set_ylim(10,10) plt.xlabel('true label') plt.ylabel('predicted label');
LAB 8/Lab8_KMeans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def won_clean(self,excel): """ won 전처리 """ won_ex = self.load_excel(excel) alist = won_ex.values dap = [] p = re.compile("[\d]{4}/[\d]{2}/[\d]{2}") for i in range(len(alist)): if p.match(alist[i][0])!=None: c=p.match(alist[i][0]) d =c.string dap.append((d,alist[i][1])) return dap
won_clean.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ### Generate synthetic data points # Data points are sampled from three normal distributions. import numpy as np import numpy.random as rand import matplotlib.pyplot as plt matplotlib inline mu1, mu2, mu3 = [15,20], [24,25], [38,40] cov = [[10, 0], [0, 10]] n_samples = 5000 data1 = rand.multivariate_normal(mu1, cov, n_samples) data2 = rand.multivariate_normal(mu2, cov, n_samples) data3 = rand.multivariate_normal(mu3, cov, n_samples) data = np.vstack((data1, data2, data3)) plt.axis('equal') plt.plot(data1[:,0], data1[:,1], '^b', label='Class_1') plt.plot(data2[:,0], data2[:,1], 'sr', label='Class_2') plt.plot(data3[:,0], data3[:,1], 'ok', label='Class_3') plt.title('Original samples') plt.legend(loc='best') # ### Compute mean values mu1 = np.mean(data1, 0) mu2 = np.mean(data2, 0) mu3 = np.mean(data3, 0) mu = np.mean(data, 0) # ### Compute the between-class scatter matrix $S_b$ # $S_b = \sum_{i}^{C} n_i (\mu^{i} - \mu)(\mu^{i} - \mu)^T$ s1 = np.outer(mu1-mu, mu1-mu)*data1.shape[0] s2 = np.outer(mu2-mu, mu2-mu)*data2.shape[0] s3 = np.outer(mu3-mu, mu3-mu)*data3.shape[0] S_b = s1 + s2 + s3 # ### Compute the within-class scatter matrix $S_w$ # # $S_w = \sum_{i=1}^{C} \sum_{j=1}^{n_i} (x^{i}_j - \mu_i)(x^{i}_j - \mu_i)^T$ def compute_within_scatter_matrix(data, mu): """ Compute the within-class scatter matrix for a given class :param data: a numpy matrix of (n_samples, n_sample_dimensions) :param mu: a list of n_sample_dimensions """ matrix = np.zeros((data.shape[1], data.shape[1])) spread = data - mu for s in range(spread.shape[0]): matrix += np.outer(spread[s,:], spread[s,:]) return matrix s1 = compute_within_scatter_matrix(data1, mu1) s2 = compute_within_scatter_matrix(data2, mu2) s3 = compute_within_scatter_matrix(data3, mu3) S_w = s1 + s2 + s3 # ### Solve the generalized eigenvalue problem for the matrix $S^{−1}_{w}S_{b}$ # eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_w).dot(S_b)) for eig_idx, eig_val in enumerate(eig_vals): print('Eigvector #{}: {} (Eigvalue:{:.3f})'.format(eig_idx, eig_vecs[:, eig_idx], eig_val)) # ### Double-check the computed eigen-vectors and eigen-values S = np.linalg.inv(S_w).dot(S_b) for eig_idx, eig_val in enumerate(eig_vals): eig_vec = eig_vecs[:, eig_idx] np.testing.assert_array_almost_equal(S.dot(eig_vec), eig_val*eig_vec, decimal=6, err_msg='', verbose=True) # ### Sort the eigenvectors by decreasing eigenvalues eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))] eig_pairs = sorted( eig_pairs, key=lambda x:x[0], reverse=True) eigv_sum = sum(eig_vals) for eig_val, eig_vec in eig_pairs: print('Eigvector: {} (Eigvalue:\t{:.3f},\t{:.2%} variance explained)'.format(eig_vec, eig_val, (eig_val/eigv_sum))) # If we take a look at the eigenvalues, we can already see that the second eigenvalue are much smaller than the first one. # Since Rank(AB) $\leq$ Rank(A), and Rank(AB) $\leq$ Rank(B), we have Rank($S_w^{-1}S_b$) $\leq$ Rank($S_b$). Due to that $S_b$ is the sum of $C$ matrices with rank 1 or less, Rank($S_b$) can be $C$-1 at most, where $C$ is the number of classes. This means that FDA can find at most $C$-1 meaningful features. The remining features discovered by FDA are arbitrary. # ### Choose m eigenvectors with the largest eigenvalues # After sorting the eigenpairs by decreasing eigenvalues, we can then construct our $d \times m$-dimensional transformation matrix $W$. # Here we choose the top most informative eigven-pair, as its eigenvalue explains 99.41% of the variance. As a result, the original d-dimensional (d=2) data points will be projected to a m-dimensional features space (m=1). # W = eig_pairs[0][1] print('Matrix W:\n', W.real) # ### Transforming the samples onto the new space # As the last step, we use the 1 $\times$ 2 dimensional matrix $W$ to transform our samples onto the embedding space via the equation $Y = W^TX$. FDA learns a linear transformation matrix $W \in R^{d \times m} (m \ll d)$, which maps each $d$-dimensional (d=2) # $x_i$ to a $m$-dimensional (m=1) $y_i$: $y_i = W^Tx_j $. X1_fda = W.dot(data1.T) X2_fda = W.dot(data2.T) X3_fda = W.dot(data3.T) # Now the transformed samples are scalar values. They are essentially the projection of the original data samples on the selected eigen vector, which corresponds to a straight line. To better visualize the projection, we visualize the transformed samples on the straight line under the original 2-dimensional space. slope = W[1]/W[0] Y1_fda = slope * X1_fda Y2_fda = slope * X2_fda Y3_fda = slope * X3_fda plt.axis('equal') plt.plot(X1_fda, Y1_fda, '^b', label='Class_1') plt.plot(X2_fda, Y2_fda, 'sr', label='Class_2') plt.plot(X3_fda, Y3_fda, 'ok', label='Class_3') plt.title('Projected samples') plt.legend(loc='best') # From the plot we can see that the projected samples retain most of the 'intrinsic information' from the original data samples. Dots with the same color stay together, while those with different colours stay away.
notebooks/2017-10-13/Demonstration_of_Linear_Discriminant_Anysis_on_Sythetic_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #Sensitivity Plot Data Clicker import numpy as np import matplotlib.pyplot as plt import os plt.style.use('seaborn-dark-palette') import pandas as pd import seaborn from astropy.io import ascii # + #Get the data directory and load in the data cwd = os.getcwd() data_dir = cwd.replace('Figure_8', 'Data\\') detec0, detec1, detec2, detec3, detec4 = ascii.read(data_dir + 'detections_fig_8_set_0.csv'), ascii.read(data_dir + 'detections_fig_8_set_1.csv'), ascii.read(data_dir + 'detections_fig_8_set_2.csv'), ascii.read(data_dir + 'detections_fig_8_set_3.csv'), ascii.read(data_dir + 'detections_fig_8_set_4.csv') detec5, detec6, detec7, detec8, detec9 = ascii.read(data_dir + 'detections_fig_8_set_5.csv'), ascii.read(data_dir + 'detections_fig_8_set_6.csv'), ascii.read(data_dir + 'detections_fig_8_set_7.csv'), ascii.read(data_dir + 'detections_fig_8_set_8.csv'), ascii.read(data_dir + 'detections_fig_8_set_9.csv') detec = [detec0, detec1, detec2, detec3, detec4, detec5, detec6, detec7, detec8, detec9] detec_array = np.zeros((11, 100)) for k in range(len(detec)): for i in range(11): for j in range(10): name = 'col' + str(j+1) detec_array[i, k*10+j] = detec[k][name][i] # + #Make the array suitable for a heatmap period_space = np.logspace(np.log10(3), np.log10(40), 100) lin_period_space = np.linspace(3, 40, 10000) new_detec = np.zeros((11, len(lin_period_space))) for i in range(len(lin_period_space)): idx = np.argmin(abs(period_space - lin_period_space[i])) new_detec[:, i] = detec_array[:, idx] # + #Make the heatmap plot import matplotlib font = { 'weight':'normal', 'size':42} plt.rc('font', **font) periods = [] for i in range(len(lin_period_space)): periods.append('{:.1f}'.format(lin_period_space[i])) radii = [0.5, 0.65, 0.8, 0.95, 1.1, 1.25, 1.4, 1.55, 1.7, 1.85, 2] alt_detec = pd.DataFrame(new_detec, index = radii, columns = periods) ax_detec = seaborn.heatmap(alt_detec, cbar_kws = {'label': 'Number of Retrievals', 'orientation' : 'horizontal'}, xticklabels = 540, yticklabels = 1) plt.gca().invert_yaxis() plt.ylabel('Planetary Radius ($R_{Jup}$)', fontsize=40) plt.xlabel('Orbital Period (Days)', labelpad=20, fontsize=40, y=10) fig = plt.gcf() fig.set_size_inches(18, 18) plt.tight_layout() #plt.savefig('j1407_real_results_w_mr_NR.png') plt.show()
Figure_8/Figure_8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ernestomancebo/DeepLearningInPractice/blob/main/text_and_sequences/gru_conv1d_weather_forecast.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="k0RUkTwqY6nT" # #Weather Forecast with GRU on top of a Conv1D # # Knowing the potential of feature extraction from the a convolutional layer, we're reducing the representation of the sequences and pushing it into a GRU layer and see how well it performs. This approach also makes it cheaper in terms of learning speed. # # ##Getting the Dataset # + colab={"base_uri": "https://localhost:8080/"} id="2KAbtC0sadD5" outputId="8f447672-4950-4bf7-ec6a-b95217f9fde0" # First we set up the data folder # !mkdir jena_climate # !wget https://s3.amazonaws.com/keras-datasets/jena_climate_2009_2016.csv.zip # !unzip jena_climate_2009_2016.csv.zip -d jena_climate # !rm jena_climate_2009_2016.csv.zip # + [markdown] id="9UU1qL7eczF8" # Now we process the input file which contains all the data. # + id="IHXTi0SIbrhj" colab={"base_uri": "https://localhost:8080/"} outputId="48b3251a-a5b6-4519-b2f2-b1b1c2cd1746" import os data_dir = 'jena_climate' fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv') f = open(fname) data = f.read() f.close() lines = data.split('\n') header = lines[0].split(',') lines = lines[1:] print(f'Header: {header}') print(f'No. Lines: {len(lines)}') # + [markdown] id="dFRuisX-dSpQ" # Moving from primitive array to NumPy array # + id="lpLDIlU0dZ2l" import numpy as np # Dimenssion: Elements, Features float_data = np.zeros((len(lines), len(header) -1)) for i, line in enumerate(lines): values = [float(x) for x in line.split(',')[1:]] float_data[i, :] = values # + [markdown] id="6No335Y7d3K9" # ###Ploting the temperature in timeseries. # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="yJLu_6zTeGuF" outputId="47b53043-365d-40a4-841d-93b4243567fe" from matplotlib import pyplot as plt temp = float_data[:, 1] plt.figure(figsize=(18, 6)) plt.plot(range(len(temp)), temp) # + [markdown] id="yeMuaeZ3fIME" # The dataset is composed by a 10-minutes frame metrics, meaning that per day there's `144` measures. To figure the first ten days we have to plot the first `1440` entries. # + colab={"base_uri": "https://localhost:8080/", "height": 391} id="3hWWYhrCe8M2" outputId="42a920ba-247e-4619-c2e5-1a5d09bb4541" plt.figure(figsize=(18, 6)) plt.plot(range(1440), temp[:1440]) # + [markdown] id="rq-X2Yeufjbi" # ##Preparing the Data # # We're shaping the problem as follow: # # Given a data going back as `lookback` timesteps (knowing that a timestep is 10 mint), and sampling every `steps` timesteps, we're predicting the temperature in `delay` timestep. # # That said, some sample values are: # # - `lookback = 720` - Training back 5 days. # - `steps = 6` - Looking a 1 point per hour: 60/10 = 6. # - `delay = 144` - The target is 24 hrs: 6 * 24 = 144. # + [markdown] id="zAwOmv9IhmJ5" # We also need to shape the `float_data` into something that the neural network can digest, and also we need to scale down everything. # # We're using the first 200,000 entries for training, so we're taking its mean and standard deviation. # + id="kUZk0J4uhmTr" mean = float_data[:200000].mean(axis=0) float_data -= mean std = float_data[:200000].std(axis=0) float_data /= std # + [markdown] id="c_ZkALGDhmc5" # Now we're setting a `generator` that yields a tuple of input data shaped as `(samples, targets)`. # + id="XDKGNfrrjQav" def generator(data,lookback, delay, min_index, max_index, shuffle=False, batch_size=128, step=6): if max_index is None: max_index = len(data) - delay - 1 i = min_index + lookback while 1: if shuffle: rows = np.random.randint(min_index + lookback, max_index, size=batch_size) else: if i + batch_size >= max_index: i = min_index + lookback rows = np.arange(i, min(i + batch_size, max_index)) i += len(rows) samples = np.zeros((len(rows), lookback // step, data.shape[-1])) targets = np.zeros((len(rows),)) for j, row in enumerate(rows): # Defines the data slide indices idx = range(rows[j] - lookback, rows[j], step) samples[j] = data[idx] targets[j] = data[rows[j] + delay][1] yield samples, targets # + [markdown] id="Xr6FPVhAjQjG" # Now we define different generators for the splits we need # + id="6276MdmqjQr6" lookback = 720 # The step is 3, not 6 as seen in other examples step = 3 delay = 144 batch_size = 128 train_gen = generator( float_data, lookback=lookback, delay=delay, min_index=0, max_index=200_000, shuffle=True, step=step, batch_size=batch_size ) val_gen = generator( float_data, lookback=lookback, delay=delay, min_index=200_001, max_index=300_000, step=step, batch_size=batch_size ) test_gen = generator( float_data, lookback=lookback, delay=delay, min_index=300_001, max_index=None, step=step, batch_size=batch_size ) val_steps = (300_000 - 200_001 - lookback) // batch_size test_steps = (len(float_data) - 300_001 - lookback) // batch_size # + id="juatmrMj4Np0" colab={"base_uri": "https://localhost:8080/"} outputId="c589b132-e6f9-48ea-85bd-8758d33f82b3" from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop model = Sequential() model.add(layers.Conv1D(32, 5, activation='relu', input_shape=(None, float_data.shape[-1]))) model.add(layers.MaxPooling1D(3)) model.add(layers.Conv1D(32, 5, activation='relu')) model.add(layers.GRU(32, dropout=0.1, recurrent_dropout=0.5)) model.add(layers.Dense(1)) model.summary() model.compile(optimizer=RMSprop(), loss='mae') # + id="rTadlZli4Nw4" colab={"base_uri": "https://localhost:8080/"} outputId="858cd657-d5f7-46cb-c03e-2e90b5cf4386" history = model.fit( train_gen, steps_per_epoch=500, epochs=20, validation_data=val_gen, validation_steps=val_steps ) # + id="MFZw_XfF4N5U" import matplotlib.pyplot as plt def plot_history(history): loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # + id="ryZMtdxF0NQI" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="427960fc-5b36-4b4a-dea0-a2ea25e7d95d" plot_history(history) # + [markdown] id="8Bov5tAU78Cz" # Perhaps this seems to overfit, the result isn't bad at all and really fast training and not so far from a soley RNN implementation.
text_and_sequences/gru_conv1d_weather_forecast.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python for R users # # Part 6: Linear algebra and statistical modeling # # # Here are some great resources (from which some of the material below was adapted): # - https://github.com/kuleshov/cs228-material/blob/master/tutorials/python/cs228-python-tutorial.ipynb # - https://cs231n.github.io/python-numpy-tutorial/#numpy # # First we need to tell Jupyter to let us use R within this Python notebook, and load some relevant libraries # + import numpy import pandas import statsmodels.api # %load_ext rpy2.ipython # - # ## Creating matrices in Numpy # # First let's refresh a bit on numpy matrices and look at the various functions that are available to work with them. Note that as we discussed in an earlier section, Numpy has two types of objects that can represent a numeric matrix: arrays and matrices. Because Numpy matrices are not used very often, we will focus here on arrays, so when I used the term "matrix" I am really referring to "a 2-dimensional numpy.array object". # # Let's create a matrix ```X```: # # $X = \begin{bmatrix} # 1 & 5\\ # 2 & 6\\ # 3 & 7\\ # 4 & 8 # \end{bmatrix}$ X = numpy.array([[1, 5], [2, 6], [3, 7], [4, 8]]) X X.shape # Let's say that we want to create a matrix with a single column called ```Y```: # # $Y = \begin{bmatrix} # 8\\ # 10\\ # 12\\ # 14 # \end{bmatrix}$ # # # If we just create a vector, it will only have a single dimension: Y_vector = numpy.array([8, 10, 12, 14]) Y_vector.shape # You might think that transposing it would turn it into a column vector, but that doesn't work: Y_vector.T Y_vector.T.shape # There are a couple of ways around this. First, we could specify it as a set of vectors: Y = numpy.array([[8], [10], [12], [14]]) Y # We could also create a vector and then turn it into a matrix by adding a dimension: # + Y = numpy.array([ 8, 10, 12, 14]) # create a vector print(Y) Y = Y[:, numpy.newaxis] # add a new axis using numpy.newaxis print(Y) # - # There are also a number of ways to create full matrices in Numpy: numpy.zeros((4, 4)) # create a matrix full of zeros numpy.ones((4, 4)) # create a matrix full of ones numpy.eye(4) # create a identity matrix numpy.random.randn(4, 4) # create a matrix of random normal variates # Finally, we can take a vector and reshape it into a matrix using the ```.reshape()``` operator numpy.random.randn(16).reshape((4, 4)) # create a 16 item vector and reshape into a 4 x 4 array # ## Basic arithmetic on matrices # # All of the standard aritmetic and logical operators work on matrices in Numpy, operating element-wise. # # A = numpy.ones((3,3)) * 2 print(A) B = numpy.arange(1,10).reshape(3,3) print(B) A + B # elementwise addition A - B # elementwise subtraction A * B # elementwise multiplication A / B # elementwise division A == B # elementwise equality # ## Matrix multiplication # # Matrix multiplication is performed in Numpy using the ```.dot()``` operator. # # First let's look at a simple example: the inner product of two matrices (aka the "dot product"). A = numpy.array([[1, 2, 3, 4]]) print(A.shape) A B = numpy.array([[5, 6, 7, 8]]).T print(B.shape) B dp = A.dot(B) dp # Note that the ```.dot()``` operator returns a single value (i.e. a *scalar*) but it returns it in the form of a matrix. Thus, if we wanted to work with that value we would need to reference it in the matrix: dp[0, 0] # The ```.dot()``` operator also performs matrix multiplication I = numpy.eye(B.shape[0]) I I.dot(B) # multiply by identity to get original matrix # ## Matrix inversion # # Matrix inversion for square matrices (when possible) can be performed using the ```numpy.linalg.inv()``` function. I = numpy.eye(4) numpy.linalg.inv(I) # inverse of an identity is itself # Often in statistics we need to compute the pseudo-inverse of a matrix that is not square. We can do that using the ```numpy.linalg.pinv()``` function R = numpy.random.randn(6, 4) R pinv = numpy.linalg.pinv(R) pinv pinv.dot(R) # should give back an identity matrix, probably with small numeric differences in off-diagnoals # ## Statistical modeling # # Now let's look at how to use linear algebra in Python to perform statistical modeling. We will fit the general linear model: # # $ Y = X * \beta$ # # where Y are the data, X is the *design matrix*, and $\beta$ is a vector of model parameters. We can estimate the parameters using the following equation: # # $ \hat{\beta} = (X^T X)^{-1}X^T Y$ # # We would generally want to include a column of ones in the design matrix (i.e. an intercept term) in addition to the independent variables of interest, in order to fit the mean of the data. Let's start by fitting a simple linear regression with five observations, using the following design matrix: # # $X = \begin{bmatrix} # -2 & 1\\ # -1 & 1\\ # 0 & 1\\ # 1 & 1\\ # 2 & 1 # \end{bmatrix}$ # # The first columns reflects the linear term, and the second column reflects the intercept. Note that we have *centered* the linear term by removing its mean. # X = numpy.ones((5, 2)) X[:, 0] = numpy.arange(-2, 3) X # Let's generate some data using this model: beta = numpy.array([3, 2]) # slope = 3, mean = 2 Y = X.dot(beta) Y # Now we can estimate the parameters from the data: beta_hat = numpy.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y) beta_hat # We could also use a built-in function from the statsmodels library to perfom linear regression on these data, which also provides statistics for the regression model. Statsmodels uses a design pattern that is common in Python, where the model is first configured and then fit in a separate step. model = statsmodels.api.OLS(Y, X) # set up the model results = model.fit() # fit the model print(results.summary())
notebooks/Part6_LinearAlgebraStatisticalModeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- NAME = "<NAME>" DATE = "05052020" COLLABORATORS = "" # <img style="float: left;" src="earth-lab-logo-rgb.png" width="150" height="150" /> # # # Earth Analytics Education # # Climate Change and Wildfires # ## Part II Weather of San Diego and Atmospheric Composition in 2014 # ## Workflow # 1. Import packages and modules # 2. Import datetime conversion tools beteween panda and matplotlib for time series analysis # 3. Download air quality data from the EPA website # 4. Set working directory to "earth-analytics" # 5. Define paths to download data files from data folder 'sd_fires_2014' # 6. Import data into dataframes using appropriate functions(date-parser, indexing, remove missing values) # * weather data Jan-Dec. 2014 # * Atmospheric gases and particulate matter data Jan - Dec. 2014 # 7. view nature and type of data # 8. Resample data for daily values during 2014 fire season (Mar-June 2014) # 9. Plot data to view any anomalies in data . # 10. discuss plots and conclusions. # # ## Resources # # To complete this part of the assignment following chapter/websites were reviewed. # * Time series data <a href="https://www.earthdatascience.org/courses/use-data-open-source-python/use-time-series-data-in-python/date-time-types-in-pandas-python//" target="_blank">Time Series Data in Python</a> online textbook, which covers writing Date, time functions in Python. # * General python commands <a href="https://www.earthdatascience.org/courses/intro-to-earth-data-science//" target="_blank">Intro to Earth Data Science online textbook</a> # # * Environmental Protection Agency, USA. <a href="https://https://www.epa.gov/outdoor-air-quality-data//" target="_blank">EPA website/ User Guide to download data</a>. # * Earthpy functions <a href="https://earthpy.readthedocs.io/en/latest/earthpy-data-subsets.html#colorado-flood//" target="_blank">EarthPy User Guide</a>. # * Adherence to PEP 8 standards <a href="https://www.earthdatascience.org/courses/use-data-open-source-python/use-time-series-data-in-python/date-time-types-in-pandas-python//" target="_blank">online textbook page on PEP 8 </a>. # + active="" # # Core imports needed for grading # import matplotcheck.notebook as nb # import matplotcheck.timeseries as ts # - # ## Import Python Packages # # In the cells below packages/modules were imported needed to: # * create plots # * set working directory # * download data using earthpy functions # <a href="https://earthpy.readthedocs.io/en/latest/earthpy-data-subsets.html#colorado-flood//" target="_blank">EarthPy </a> # * work with pandaframes # + # Import packages/ modules import os import matplotlib.pyplot as plt import numpy as np import pandas as pd import earthpy as et import matplotlib.dates as mdates from matplotlib.dates import DateFormatter import seaborn as sns import datetime from textwrap import wrap # Handle date time conversions between pandas and matplotlib from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() # Use white grid plot background from seaborn sns.set(font_scale=1.5, style="whitegrid") # - # ## Set Working Directory # 1. Use conditional statement to set working directory # 2. Set base path to download data # 3. Define path to download data # 4. Import data and save as dataframes. # + # Conditional statement to check and set working directory. ea_path = os.path.join(et.io.HOME, 'earth-analytics') if os.path.exists(ea_path): os.chdir(ea_path) print("working directory is set to earth-analytics") else: print("This path does not exist") # Set base path to download data base_path = os.path.join(ea_path, "data","SD_2014_aq_data") base_path # + # Files to download for further analysis # Define relative path to files file_path1 = os.path.join("data","SD_2014_aq_data", "daily_PM10_total_2014.csv") file_path2 = os.path.join("data", "SD_2014_aq_data", "daily_PM25_2014.csv") file_path3 = os.path.join("data", "SD_2014_aq_data", "daily_O3_2014.csv") file_path4 = os.path.join("data", "SD_2014_aq_data", "daily_NO2_2014.csv") file_path5 = os.path.join("data", "SD_2014_aq_data", "daily_CO_2014.csv") file_path6 = os.path.join("data", "SD_2014_aq_data", "daily_RH_DP_2014.csv") file_path7 = os.path.join("data", "SD_2014_aq_data", "daily_TEMP_2014.csv") file_path8= os.path.join("data", "SD_2014_aq_data", "daily_PRESS_2014.csv") file_path9 = os.path.join("data", "SD_2014_aq_data", "daily_VOCS_2014.csv") file_path10 = os.path.join("data", "SD_2014_aq_data", "daily_SO2_2014.csv") file_path11 = os.path.join("data", "SD_2014_aq_data", "daily_PM2_AQI_2014.csv") file_path12 = os.path.join("data", "SD_2014_aq_data", "daily_LEAD_2014.csv") file_path13 = os.path.join("data", "SD_2014_aq_data", "daily_WIND_2014.csv") # To check if path is created os.path.exists(file_path2) # + # Import csv files into dataframe and ensure date time is imported properly. ca_PM10_total_2014_df = pd.read_csv(file_path1, parse_dates=['Date Local'], index_col=['Date Local']) ca_PM25_2014_df = pd.read_csv(file_path2, parse_dates=['Date Local'], index_col=['Date Local']) ca_O3_2014_df = pd.read_csv(file_path3, parse_dates=['Date Local'], index_col=['Date Local']) ca_NO2_2014_df = pd.read_csv(file_path4, parse_dates=['Date Local'], index_col=['Date Local']) ca_CO_2014_df = pd.read_csv(file_path5, parse_dates=['Date Local'], index_col=['Date Local']) ca_RH_2014_df = pd.read_csv(file_path6, parse_dates=['Date Local'], index_col=['Date Local']) ca_temp_2014_df = pd.read_csv(file_path7, parse_dates=['Date Local'], index_col=['Date Local']) ca_pres_2014_df = pd.read_csv(file_path8, parse_dates=['Date Local'], index_col=['Date Local']) ca_VOC_2014_df = pd.read_csv(file_path9, parse_dates=['Date Local'], index_col=['Date Local']) ca_SO2_2014_df = pd.read_csv(file_path10, parse_dates=['Date Local'], index_col=['Date Local']) ca_pm2_AQI_2014_df = pd.read_csv(file_path11,parse_dates=['Date Local'], index_col=['Date Local']) ca_lead_2014_df = pd.read_csv(file_path12,parse_dates= ['Date Local'], index_col=['Date Local']) ca_wind_2014_df = pd.read_csv(file_path13,parse_dates=['Date Local'], index_col=['Date Local']) # View/test data to ensure correct files are loaded. ca_PM25_2014_df.head(2) # - # # Data exploration and analysis # The EPA provides data for the entire state and it is a large data set, often slowing the processing time. # Therefore, it is important to select data required to check air quality and weathe conditions. # I have selected ozone, oxides of nitrogen and carbon monoxide that are produced during wildfires. # Additionally, black carbon and particulate matter is emitted during wildfires which is dangerous to inahle. These dataset will allow me to conduct my preliminary analysis of the effects of wildfires on the air quality in San Diego County. # + # Select San Diego data for all counties needed for the statistical analysis. sd_PM10 = ca_PM10_total_2014_df[ca_PM10_total_2014_df["County Name"] =='San Diego'] sd_PM10.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_PM10.csv') sd_PM25 = ca_PM25_2014_df[ca_PM25_2014_df["County Name"] =='San Diego'] sd_PM25.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_PM25.csv') sd_O3 = ca_O3_2014_df[ca_O3_2014_df["County Name"] =='San Diego'] sd_O3.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_O3.csv') sd_NO2 = ca_NO2_2014_df[ca_NO2_2014_df["County Name"] =='San Diego'] sd_NO2.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_NO2.csv') sd_CO = ca_CO_2014_df[ca_CO_2014_df["County Name"] =='San Diego'] sd_CO.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_CO.csv') sd_vocs = ca_VOC_2014_df[ca_VOC_2014_df["County Name"] =='San Diego'] sd_vocs.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_VOC.csv') sd_lead = ca_lead_2014_df[ca_lead_2014_df["County Name"] =='San Diego'] sd_lead.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_lead.csv') sd_SO2 = ca_SO2_2014_df[ca_SO2_2014_df["County Name"] =='San Diego'] sd_SO2.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_SO2.csv') sd_aqi = ca_pm2_AQI_2014_df[ca_pm2_AQI_2014_df["County Name"] =='San Diego'] sd_aqi.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_AQI.csv') sd_RH = ca_RH_2014_df[ca_RH_2014_df["County Name"] =='San Diego'] sd_RH.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_RH.csv') sd_temp = ca_temp_2014_df[ca_temp_2014_df["County Name"] =='San Diego'] sd_temp.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_temp.csv') sd_pres = ca_pres_2014_df[ca_pres_2014_df["County Name"] =='San Diego'] sd_pres.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_pres.csv') sd_wind = ca_wind_2014_df[ca_wind_2014_df["County Name"] =='San Diego'] sd_wind.to_csv('data/output_figures/sandiego_2014_fires/SD_weather_aq2014/SD_wind.csv') # - sd_wind.columns # + # Select San Diego mean and SEM data values needed for the analysis. sd_mean_sem_O3 = sd_O3.groupby('Date Local').agg({'O3_ppb': ['mean','sem']}) sd_mean_sem_O3.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_O3.csv') sd_mean_sem_NO2 = sd_NO2.groupby('Date Local').agg({'Arithmetic Mean': ['mean','sem']}) sd_mean_sem_NO2.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_NO2.csv') sd_mean_sem_CO = sd_CO.groupby('Date Local').agg({'CO ppb': ['mean','sem']}) sd_mean_sem_CO.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_CO.csv') # VOC has only 77 measurements for the entire year. sd_mean_sem_VOC = sd_vocs.groupby('Date Local').agg({'Arithmetic Mean': ['mean','sem']}) sd_mean_sem_VOC.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_VOC.csv') # Lead only 55 values for the entire year sd_mean_sem_lead = sd_lead.groupby('Date Local').agg({'Arithmetic Mean': ['mean','sem']}) sd_mean_sem_lead.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_lead.csv') # pm25 has 365 counts for the entire year sd_mean_sem_PM25 = sd_PM25.groupby('Date Local').agg({'Arithmetic Mean': ['mean','sem']}) sd_mean_sem_PM25.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_PM25.csv') # PM10 has 110 counts for the entire year sd_mean_sem_PM10 = sd_PM10.groupby('Date Local').agg({'Arithmetic Mean': ['mean','sem']}) sd_mean_sem_PM10.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_PM10.csv') # SO2 has 223 counts for the entire year sd_mean_sem_SO2 = sd_SO2.groupby('Date Local').agg({'Arithmetic Mean': ['mean','sem']}) sd_mean_sem_SO2.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_SO2.csv') # AQI has 365 counts for the entire year sd_mean_sem_aqi = sd_aqi.groupby('Date Local').agg({'Arithmetic Mean': ['mean','sem']}) sd_mean_sem_aqi.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_aqi.csv') # RH has 365 couts for the entire year sd_mean_sem_RH = sd_RH.groupby('Date Local').agg({'RH (%)': ['mean','sem']}) sd_mean_sem_RH.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_RH.csv') # Temp has 365 values for the year sd_mean_sem_temp = sd_temp.groupby('Date Local').agg({'T_celsius': ['mean','sem']}) sd_mean_sem_temp.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_temp.csv') # Pressure has 365 values for the year sd_mean_sem_pres = sd_pres.groupby('Date Local').agg({'Arithmetic Mean': ['mean','sem']}) sd_mean_sem_pres.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_pres.csv') # Wind has 365 counts for the entire year sd_mean_sem_wind = sd_wind.groupby('Date Local').agg({'wind (mph)': ['mean','sem']}) sd_mean_sem_wind.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_mean_wind.csv') # View/test data to ensure correct data is selected. sd_temp.columns # sd_mean_sem_wind.describe() # - # Test column names are correctly imported in each file sd_O3.columns # sd_NO2.columns # Test all ARB monitoring stations in the San Diego County are included. sd_O3['City Name'].unique() # + # Take Daily mean of all the stations to facilitate analysis. ############# USED AGG function instead sd_mean_PM10 = pd.DataFrame(sd_PM10.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_CO = pd.DataFrame(sd_CO.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_O3 = pd.DataFrame(sd_O3.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_NO2 = pd.DataFrame(sd_NO2.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_RH = pd.DataFrame(sd_RH.groupby(['Date Local' ]).mean()["RH (%)"]) sd_mean_temp = pd.DataFrame(sd_temp.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_vocs = pd.DataFrame(sd_vocs.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_lead = pd.DataFrame(sd_lead.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_PM25 = pd.DataFrame(sd_PM25.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_aqi = pd.DataFrame(sd_aqi.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_pres = pd.DataFrame(sd_pres.groupby(['Date Local' ]).mean()["Arithmetic Mean"]) sd_mean_wind = pd.DataFrame(sd_wind.groupby(['Date Local' ]).mean()["wind (mph)"]) # - # # Data Visualization # I strongly believe that a picture is worth thousand words. A nice graphic that can capture the salient features of the data is important to convey a clear message to our audience. My favorite part of being a scientist is the excitement I get when an anomaly appears in the dataset. It motivates me to search for the clues and delve deeper into chemical processes and physics of the atmosphere. # After all, we are all curious beings and our inner detective is eager to deploy all sorts of techniques to arrive at the conclusion. I am a visual learner and in my teaching I prefer to share "secret life of molecules" using various visualization techniques and my students appreciate this teaching style. # + ########### wind speed is wrong in graph it has to 1-7mph at max. # Create plot space fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, sharex = True, figsize = (10, 12)) # Add subtitle fig.subplots_adjust(bottom=0.15, left=0.2) fig.suptitle(""" Figure 1(a-e). Atmospheric conditions during wildfires\n Mar - June 2014, San Diego, CA.""", fontsize = 20) # Select plot type and add x and y-axis values to the plot. ax1.plot(sd_mean_O3.index.values, sd_mean_O3['Arithmetic Mean'], color = "blue", marker = "*") ax2.plot(sd_mean_temp.index.values, sd_mean_temp['Arithmetic Mean'], color = "green", marker = "*") ax3.plot(sd_mean_RH.index.values, sd_mean_RH['RH (%)'], color = "red", marker = "*") ax4.plot(sd_mean_wind.index.values, sd_mean_wind['wind (mph)'], color = "black", marker = "*") ax5.plot(sd_mean_pres.index.values, sd_mean_pres['Arithmetic Mean'], color = "purple", marker = "*") # Set plot title, axis labels and legends ax1.set( ylabel="Ozone \n ppb)") # ax4.set(title= # "(1d). Daily Mean temp, San Diego, CA. 2014") ax2.set( ylabel="Temp. \n (oF)") # ax5.set(title= # "(1e). Daily Mean temp, San Diego, CA. 2014") ax3.set(#xlabel="Dates", ylabel="RH \n(%)") # xlim=["2014-03-01", "2014-07-01"]) ax4.set(#xlabel="Dates", ylabel="Wind Speed \n (mph)") # ax4.set(#title= # "(1d). Daily Mean wind speed (mph)") ax5.set(xlabel="Dates", ylabel=" Pressure \n (mb)", xlim=["2014-03-01", "2014-06-30"]) # ax.set_ylabel('Damped oscillation [V]', labelpad=18) # Add tick marks and rotate x-axis values # plt.setp(ax1.get_xticklabels(), rotation=45) # plt.setp(ax2.get_xticklabels(), rotation=45) # plt.setp(ax3.get_xticklabels(), rotation=45) # plt.setp(ax4.get_xticklabels(), rotation=45) plt.setp(ax5.get_xticklabels(), rotation=55) # Define the date format date_form1 = DateFormatter("%b-%d") # ax1.xaxis.set_major_formatter(date_form1) # ax2.xaxis.set_major_formatter(date_form1) # ax3.xaxis.set_major_formatter(date_form1) # ax4.xaxis.set_major_formatter(date_form1) ax5.xaxis.set_major_formatter(date_form1) plt.savefig('data/output_figures/sandiego_2014_fires/RH_T_P_wind_profile.png') # fig.tight_layout() # + #################### Temporarily tagged ########### # # Create plot space # fig, (ax1, ax2, ax3) = plt.subplots(3, # sharex = True, figsize = (10, 12)) # # Add subtitle # fig.subplots_adjust(bottom=0.15, left=0.2) # fig.suptitle(""" Figure 1(a-e). Atmospheric conditions during wildfires\n # Mar - June 2014, San Diego, CA.""", fontsize = 20) # # # Select plot type and add x and y-axis values to the plot. # # ax1.plot(sd_mean_O3.index.values, # # sd_mean_O3['Arithmetic Mean'], # # color = "blue", # # marker = "*") # ax1.plot(sd_mean_temp.index.values, # sd_mean_temp['Arithmetic Mean'], # color = "green", # marker = "*") # ax2.plot(sd_mean_RH.index.values, # sd_mean_RH['Arithmetic Mean'], # color = "red", # marker = "*") # ax3.plot(sd_mean_wind.index.values, # sd_mean_wind['Arithmetic Mean'], # color = "black", # marker = "*") # # ax5.plot(sd_mean_pres.index.values, # # sd_mean_pres['Arithmetic Mean'], # # color = "purple", # # marker = "*") # # Set plot title, axis labels and legends # # ax1.set( # # ylabel="Ozone \n ppb)") # # ax4.set(title= # # "(1d). Daily Mean temp, San Diego, CA. 2014") # ax1.set( # ylabel="Temp. \n (oF)") # # ax5.set(title= # # "(1e). Daily Mean temp, San Diego, CA. 2014") # ax2.set(#xlabel="Dates", # ylabel="RH \n(%)") # # xlim=["2014-03-01", "2014-07-01"]) # ax3.set(#xlabel="Dates", # ylabel="Wind Speed \n (mph)", # xlim=["2014-03-01", "2014-06-30"]) # # ax4.set(#title= # # "(1d). Daily Mean wind speed (mph)") # # ax5.set(xlabel="Dates", # # ylabel=" Pressure \n (mb)", # # xlim=["2014-03-01", "2014-06-30"]) # # ax.set_ylabel('Damped oscillation [V]', labelpad=18) # # Add tick marks and rotate x-axis values # # plt.setp(ax1.get_xticklabels(), rotation=45) # # plt.setp(ax2.get_xticklabels(), rotation=45) # plt.setp(ax3.get_xticklabels(), rotation=45) # # plt.setp(ax4.get_xticklabels(), rotation=45) # # plt.setp(ax5.get_xticklabels(), rotation=55) # # Define the date format # date_form1 = DateFormatter("%b-%d") # # ax1.xaxis.set_major_formatter(date_form1) # # ax2.xaxis.set_major_formatter(date_form1) # ax3.xaxis.set_major_formatter(date_form1) # # ax4.xaxis.set_major_formatter(date_form1) # # ax5.xaxis.set_major_formatter(date_form1) # plt.savefig('data/output_figures/sandiego_2014_fires/RH_T_wind_profile.png') # # plt.tight_layout() # + # Create plot space fig, (ax1, ax2, ax3,ax4, ax5, ax6) = plt.subplots(6,1, sharex = True, figsize = (10, 12)) # Add subtitle fig.subplots_adjust(bottom=0.15, left=0.2) fig.suptitle(""" Figure 2(a-e). Chemical composition of the Atmosphere during wildfires\n Mar-June 2014, San Diego, CA.""", fontsize = 20) # Select plot type and add x and y-axis values to the plot ax1.plot(sd_mean_PM10.index.values, sd_mean_PM10['Arithmetic Mean'], color = "black", marker = "o") ax2.plot(sd_mean_aqi.index.values, sd_mean_aqi['Arithmetic Mean'], color = "red", marker = "*") ax3.plot(sd_mean_O3.index.values, sd_mean_O3['Arithmetic Mean'], color = "blue", marker = "*") ax4.plot(sd_mean_NO2.index.values, sd_mean_NO2['Arithmetic Mean'], color = "green", marker = "*") ax5.plot(sd_mean_CO.index.values, sd_mean_CO['Arithmetic Mean'], color = "purple", marker = "*") ax6.plot(sd_mean_lead.index.values, sd_mean_lead['Arithmetic Mean'], color = "orange", marker = "o") # Set plot title, axis labels and legends # ax1.set(title= # "(2a). Daily Mean PM10, San Diego, CA. 2014") ax1.set(#xlabel="Dates", ylabel="PM10 \n (ug/m3)", xlim=["2014-03-01", "2014-07-01"]) # ax2.set(title= # "(2b). Daily PM2.5, San Diego, CA. 2014") ax2.set(#xlabel="Dates", ylabel="PM2.5 \n (ug/m3)", xlim=["2014-03-01", "2014-07-01"]) # ax3.set(title= # "(2c). Daily Mean O3, San Diego, CA. 2014") ax3.set(#xlabel="Dates", ylabel="Ozone \n (ppm)", xlim=["2014-03-01", "2014-07-01"]) ax4.set(#xlabel="Dates", ylabel=" Nitric oxide\n (ppm)", xlim=["2014-03-01", "2014-07-01"]) ax5.set(#xlabel="Dates", ylabel="CO \n (ppm)", xlim=["2014-03-01", "2014-07-01"]) ax6.set(xlabel="Dates", ylabel="Lead \n (ug/m3)", xlim=["2014-03-01", "2014-07-01"]) # Add tick marks and rotate x-axis values # plt.setp(ax1.get_xticklabels(), rotation=45) # plt.setp(ax2.get_xticklabels(), rotation=45) # plt.setp(ax3.get_xticklabels(), rotation=45) # plt.setp(ax4.get_xticklabels(), rotation=45) plt.setp(ax6.get_xticklabels(), rotation=45) # Define the date format date_form1 = DateFormatter("%b-%d") ax1.xaxis.set_major_formatter(date_form1) ax2.xaxis.set_major_formatter(date_form1) ax3.xaxis.set_major_formatter(date_form1) ax4.xaxis.set_major_formatter(date_form1) ax5.xaxis.set_major_formatter(date_form1) ax6.xaxis.set_major_formatter(date_form1) plt.savefig('data/output_figures/sandiego_2014_fires/chemical_comp_SD_atm1.png') # fig.tight_layout() # + # Create plot space fig, (ax1, ax2, ax3) = plt.subplots(3,1, sharex = True, figsize = (10, 12)) # Add subtitle # fig.subplots_adjust(bottom=0.15, left=0.2) fig.suptitle(""" Figure 4(a-c). Chemical composition of the Atmosphere \n Mar-June 2014, San Diego, CA.""", fontsize = 20) # Select plot type and add x and y-axis values to the plot # ax1.plot(sd_mean_PM10.index.values, # sd_mean_PM10['Arithmetic Mean'], # color = "black", # marker = "o") # ax2.plot(sd_mean_aqi.index.values, # sd_mean_aqi['Arithmetic Mean'], # color = "red", # marker = "*") # ax3.plot(sd_mean_O3.index.values, # sd_mean_O3['Arithmetic Mean'], # color = "blue", # marker = "*") ax1.plot(sd_mean_NO2.index.values, sd_mean_NO2['Arithmetic Mean'], color = "green", marker = "*") ax2.plot(sd_mean_CO.index.values, sd_mean_CO['Arithmetic Mean'], color = "purple", marker = "*") ax3.plot(sd_mean_lead.index.values, sd_mean_lead['Arithmetic Mean'], color = "orange", marker = "o") # Set plot title, axis labels and legends # ax1.set(title= # "(2a). Daily Mean PM10, San Diego, CA. 2014") # ax1.set(#xlabel="Dates", # ylabel="PM10 \n (ug/m3)", # xlim=["2014-03-01", "2014-07-01"]) # # ax2.set(title= # # "(2b). Daily PM2.5, San Diego, CA. 2014") # ax2.set(#xlabel="Dates", # ylabel="PM2.5 \n (ug/m3)", # xlim=["2014-03-01", "2014-07-01"]) # # ax3.set(title= # # "(2c). Daily Mean O3, San Diego, CA. 2014") # ax3.set(#xlabel="Dates", # ylabel="Ozone \n (ppm)", # xlim=["2014-03-01", "2014-07-01"]) ax1.set(#xlabel="Dates", ylabel=" Nitric oxide\n (ppm)", xlim=["2014-03-01", "2014-07-01"]) ax2.set(#xlabel="Dates", ylabel="CO \n (ppm)", xlim=["2014-03-01", "2014-07-01"]) ax3.set(xlabel="Dates", ylabel="Lead \n (ug/m3)", xlim=["2014-03-01", "2014-07-01"]) # Add tick marks and rotate x-axis values # plt.setp(ax1.get_xticklabels(), rotation=45) # plt.setp(ax2.get_xticklabels(), rotation=45) plt.setp(ax3.get_xticklabels(), rotation=45) # plt.setp(ax4.get_xticklabels(), rotation=45) # plt.setp(ax6.get_xticklabels(), rotation=45) # Define the date format date_form1 = DateFormatter("%b-%d") # ax1.xaxis.set_major_formatter(date_form1) # ax2.xaxis.set_major_formatter(date_form1) ax3.xaxis.set_major_formatter(date_form1) # ax4.xaxis.set_major_formatter(date_form1) # ax5.xaxis.set_major_formatter(date_form1) # ax6.xaxis.set_major_formatter(date_form1) plt.savefig('data/output_figures/sandiego_2014_fires/NOx_CO_Pb_sandiego.png') fig.tight_layout() # + # Create plot space fig, (ax1, ax2, ax3,ax4, ax5) = plt.subplots(5,1, sharex = True, figsize = (10, 12)) # Add subtitle fig.subplots_adjust(bottom=0.15, left=0.2) fig.suptitle(""" Figure 2(a-e). Chemical composition of the Atmosphere during wildfires\n Mar-June 2014, San Diego, CA.""", fontsize = 20) # Select plot type and add x and y-axis values to the plot ax1.plot(sd_mean_PM10.index.values, sd_mean_PM10['Arithmetic Mean'], color = "black", marker = "o") ax2.plot(sd_mean_aqi.index.values, sd_mean_aqi['Arithmetic Mean'], color = "red", marker = "*") ax3.plot(sd_mean_O3.index.values, sd_mean_O3['Arithmetic Mean'], color = "blue", marker = "*") ax4.plot(sd_mean_NO2.index.values, sd_mean_NO2['Arithmetic Mean'], color = "green", marker = "*") ax5.plot(sd_mean_CO.index.values, sd_mean_CO['Arithmetic Mean'], color = "purple", marker = "*") # ax6.plot(sd_mean_lead.index.values, # sd_mean_lead['Arithmetic Mean'], # color = "orange", # marker = "o") # Set plot title, axis labels and legends # ax1.set(title= # "(2a). Daily Mean PM10, San Diego, CA. 2014") ax1.set(#xlabel="Dates", ylabel="PM10 \n (ug/m3)", xlim=["2014-03-01", "2014-07-01"]) # ax2.set(title= # "(2b). Daily PM2.5, San Diego, CA. 2014") ax2.set(#xlabel="Dates", ylabel="PM2.5 \n (ug/m3)", xlim=["2014-03-01", "2014-07-01"]) # ax3.set(title= # "(2c). Daily Mean O3, San Diego, CA. 2014") ax3.set(#xlabel="Dates", ylabel="Ozone \n (ppm)", xlim=["2014-03-01", "2014-07-01"]) ax4.set(#xlabel="Dates", ylabel=" Nitric oxide\n (ppm)", xlim=["2014-03-01", "2014-07-01"]) ax5.set(#xlabel="Dates", ylabel="CO \n (ppm)", xlim=["2014-03-01", "2014-07-01"]) # ax6.set(xlabel="Dates", # ylabel="Lead \n (ug/m3)", # xlim=["2014-03-01", "2014-07-01"]) # Add tick marks and rotate x-axis values # plt.setp(ax1.get_xticklabels(), rotation=45) # plt.setp(ax2.get_xticklabels(), rotation=45) # plt.setp(ax3.get_xticklabels(), rotation=45) # plt.setp(ax4.get_xticklabels(), rotation=45) plt.setp(ax5.get_xticklabels(), rotation=45) # plt.setp(ax6.get_xticklabels(), rotation=45) # Define the date format date_form1 = DateFormatter("%b-%d") ax1.xaxis.set_major_formatter(date_form1) ax2.xaxis.set_major_formatter(date_form1) ax3.xaxis.set_major_formatter(date_form1) ax4.xaxis.set_major_formatter(date_form1) ax5.xaxis.set_major_formatter(date_form1) # ax6.xaxis.set_major_formatter(date_form1) plt.savefig('data/output_figures/sandiego_2014_fires/chemical_comp_SD_atm_v01.png') # fig.tight_layout() # + # Create plot space fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex = True, figsize = (10, 12)) # Add subtitle fig.subplots_adjust(bottom=0.15, left=0.2) fig.suptitle(""" Figure 6(a-d). Atmospheric conditions during wildfires\n Mar - June 2014, San Diego, CA.""", fontsize = 20) # Select plot type and add x and y-axis values to the plot. ax1.plot(sd_mean_O3.index.values, sd_mean_O3['Arithmetic Mean'], color = "blue", marker = "*") ax2.plot(sd_mean_temp.index.values, sd_mean_temp['Arithmetic Mean'], color = "green", marker = "*") ax3.plot(sd_mean_RH.index.values, sd_mean_RH['Arithmetic Mean'], color = "red", marker = "*") ax4.plot(sd_mean_wind.index.values, sd_mean_wind['Arithmetic Mean'], color = "black", marker = "*") # ax5.plot(sd_mean_pres.index.values, # sd_mean_pres['Arithmetic Mean'], # color = "purple", # marker = "*") # Set plot title, axis labels and legends ax1.set( ylabel="Ozone \n ppb)") # ax4.set(title= # "(1d). Daily Mean temp, San Diego, CA. 2014") ax2.set( ylabel="Temp. \n (oF)") # ax5.set(title= # "(1e). Daily Mean temp, San Diego, CA. 2014") ax3.set(#xlabel="Dates", ylabel="RH \n(%)") # xlim=["2014-03-01", "2014-07-01"]) ax4.set(xlabel="Dates", ylabel="Wind Speed \n (mph)", xlim=["2014-03-01", "2014-06-30"]) # ax4.set(#title= # "(1d). Daily Mean wind speed (mph)") # ax5.set(xlabel="Dates", # ylabel=" Pressure \n (mb)", # xlim=["2014-03-01", "2014-06-30"]) # ax.set_ylabel('Damped oscillation [V]', labelpad=18) # Add tick marks and rotate x-axis values # plt.setp(ax1.get_xticklabels(), rotation=45) # plt.setp(ax2.get_xticklabels(), rotation=45) # plt.setp(ax3.get_xticklabels(), rotation=45) plt.setp(ax4.get_xticklabels(), rotation=45) # plt.setp(ax5.get_xticklabels(), rotation=55) # Define the date format date_form1 = DateFormatter("%b-%d") # ax1.xaxis.set_major_formatter(date_form1) # ax2.xaxis.set_major_formatter(date_form1) # ax3.xaxis.set_major_formatter(date_form1) ax4.xaxis.set_major_formatter(date_form1) # ax5.xaxis.set_major_formatter(date_form1) plt.savefig('data/output_figures/sandiego_2014_fires/Temp_RH_p_profile.png') # fig.tight_layout() # - # # Results and Conclusions: # 1. Figure 1a-e clearly indicates that high temperature, low relative humidity and low pressure in the Southern California was prevalent from Apr. June. These conditions are called "Santa Ana" events which are responsible for brining hot dry air from the Great Plains of the USA towards coastal areas. # 2. The extreme dry weather is the main culprit to ignite wild fires in the County of San Diego. # 3. Figure 2a-e indicates that as a result of wildfires, particulate matter (PM10 and PM2.5) concentration increased in the air as shown by a peak value on May 15, 2014. # 4. During wildfires the pollutants concentrations such as oxides of nitrogen, carbon monoxide and ozone concentraions has been increased to dangerous levels (doubles/ tripled than the Maximum Permissible limit by the EPA. # 5. We breathe 3000L of air every day and such high levels of pollutants often trigger respiraotry and cardiovasuclar diseases. Chidren and elderly people, sensitive populations often experience respiratory distress/asthma during such pollution events. # 6. There is a mystery peak of O3, NOx and lead in early April and require further attention. # + # Create figure to show 2013 precipitation pattern using three subsets created from the orignal dataset. fig, (ax1, ax2, ax3) = plt.subplots(3,1, figsize = (10, 10)) offset = 0.3 # Add x-axis and y-axis ax1.plot(daily_precip_aug_oct_2013.index.values, daily_precip_aug_oct_2013['HPCP'], color='green', marker = '*') ax2.plot(monthly_max_precip_2013.index.values, monthly_max_precip_2013['HPCP'], color = 'red', marker = 'o') ax3.plot(mo_total_precip_1948_2013.index.values, mo_total_precip_1948_2013['HPCP'], color = 'blue') # Set title and labels for axes ax1.set(xlabel="Date", ylabel="Precipitation (inches)", title=" (1a). Daily Total Precipitation (Aug-Oct 2013) in Boulder, CO") ax2.set(xlabel="Date", ylabel="Precipitation (inches)", title=" (1b). Monthly maximum of daily precipitation 2013, Boulder, CO") ax3.set(xlabel="Date", ylabel="Precipitation (inches)", title=" (1c). Monthly Total Precipitation 1948-2013, Boulder, CO ") # Rotate tick marks on x-axis plt.setp(ax1.get_xticklabels(), rotation=45) plt.setp(ax2.get_xticklabels(), rotation=45) plt.setp(ax3.get_xticklabels(), rotation=45) # Define the date format date_form1 = DateFormatter("%b-%d") ax1.xaxis.set_major_formatter(date_form1) date_form2 = DateFormatter("%b-%d") ax2.xaxis.set_major_formatter(date_form2) date_form3 = DateFormatter("%Y") ax3.xaxis.set_major_formatter(date_form3) plt.tight_layout() plt.show() # # Ensure a major tick for each week using (interval=1) # ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1) # - # # Stream Discharge (cubic feet per second - CFS) # ## Data selection/analysis for figure 2: # 1. one subplot for daily discharge values from Aug 1, 2013 to Oct 1, 2013 # * there should be one value for each day in time period # * x axis label should be month-day (e.g. "Aug-01") # 2. one subplot for monthly maximum values of discharge in 2013 # * there should be one value for each month in 2013 # * x axis label should be month-day (e.g. "Aug-01") # 3. one subplot for monthly totals of discharge for all years in dataset # * there should be one value for each month-year (e.g. Jan 2001, Feb 2001, so on) # * x axis label should be Year (e.g. "2013") # Import discharge data to a data frame. # The code below parse date, index date and removes missing values. boulder_daily_discharge_1986_2013= pd.read_csv(file_path2, parse_dates=['datetime'], index_col= ['datetime'], na_values=['999.99']) boulder_daily_discharge_1986_2013.head() # To obtain information on the data type boulder_daily_discharge_1986_2013.info() # To obtain statics on the discharge data boulder_daily_discharge_1986_2013.describe() # To select subset of daily discharge values from Aug 1, 2013 to Oct 1, 2013 daily_dis_aug_oct_2013= (boulder_daily_discharge_1986_2013['2013-08-01':'2013-10-01'] .resample('D').sum()) daily_dis_aug_oct_2013 # To select subset of monthly maximum values of discharge in 2013 monthly_max_dis_2013= (boulder_daily_discharge_1986_2013['2013-01-01':'2013-12-31'] .resample('M').max()) monthly_max_dis_2013 # To select monthly discharge vlaues for all stations for the entire dataset mo_total_dis_1986_2013= boulder_daily_discharge_1986_2013.resample('M').sum() mo_total_dis_1986_2013 # ## A comparison of 2013 and historical discharge of water (CFS) through streams # ## in Boulder, CO. # ### Each subplot includes: # * geographic coverage. # * duration or temporal extent of the data. # * what was actually measured and/or represented by the data. # * units of measurement. # + # Create figure and subplot space for discharge in Boulder fig, (ax1, ax2, ax3) = plt.subplots(3,1, figsize = (10, 10)) # Add x-axis and y-axis ax1.plot(daily_dis_aug_oct_2013.index.values, daily_dis_aug_oct_2013['disValue'], color='green', marker = '*') ax2.plot(monthly_max_dis_2013.index.values, monthly_max_dis_2013['disValue'], color = 'red', marker = 'o') ax3.plot(mo_total_dis_1986_2013.index.values, mo_total_dis_1986_2013['disValue'], color = 'blue') # Set title and labels for axes ax1.set(xlabel="Date", ylabel="Discharge (CFS)", title="(2a). Mean daily Total Discharge (CFS) Aug-oct 2013, Boulder, CO.") ax2.set(xlabel="Date", ylabel="Discharge (CFS)", title="(2b). Monthly maximum of daily mean discharge (CFS) 2013 Boulder, CO.") ax3.set(xlabel="Date", ylabel="Discharge (CFS)", title="(2c). Monthly Total of daily mean discharge (CFS) 1986-2013 Boulder, CO.") # Rotate tick marks on x-axis plt.setp(ax1.get_xticklabels(), rotation=45) plt.setp(ax2.get_xticklabels(), rotation=45) plt.setp(ax3.get_xticklabels(), rotation=45) # Define the date format date_form1 = DateFormatter("%b-%d") ax1.xaxis.set_major_formatter(date_form1) ax2.xaxis.set_major_formatter(date_form1) date_form3 = DateFormatter("%Y") ax3.xaxis.set_major_formatter(date_form3) # To adjust proper spacing plt.tight_layout() plt.show() # - # # Results and Discussion on plots # 1. Figure. 1a. shows there was an exceptionally higher rainfall on two consecutive days (Sep12-13). # 2. Fig 1b-c also indicates higher than usual precipitaton in sep. 2013. # 3. Fig 2a-b indicates higher discharge in sep. 2013. # 4. It appears from Fig.2c that 1995 and 1997 also experienced similar discharge rates but there was no flooding. # 5. I conclude from these figures that higher rainfall on two consecutive days in sep. 2013 caused overflow of water leading to the historic flood in Boulder, Colorado, USA. # ## To prove my conclusion, I have further analyzed data and plotted Fig. 3a-d. # * Total monthly discharge (26000 CFS) in July 1995, July 1997 was similar to Sep 2013 (Fig. 3a-b). # * However, it is the monthly maximum discharge which tripled in Sep. 2013 (5000 CFS) compared to 1500 CFS in 1995 and 1997 which caused hostric flood in 2013 in Boulder, Colorado, USA (Fig. 3c-d). # + # To understand the exceptional nature of 2013 floods in comparison with 1995-1997, # following subsets were analyzed for further comparison. # 1. Monthly total discharge in 2013, 1995-1997 monthly_tot_dis_2013= boulder_daily_discharge_1986_2013['2013-01-01':'2013-12-31'].resample('M').sum() monthly_tot_dis_1995_97= boulder_daily_discharge_1986_2013['1995-01-01':'1997-12-31'].resample('M').sum() # 2. Monthly maximum discharge in 2013 and 1995-1997 monthly_max_dis_2013= boulder_daily_discharge_1986_2013['2013-01-01':'2013-12-31'].resample('M').max() monthly_max_dis_1995_97= boulder_daily_discharge_1986_2013['1995-01-01':'1997-12-31'].resample('M').max() # + # Create figure and subplot space for discharge in Boulder fig, (ax1, ax2, ax3, ax4) = plt.subplots(4,1, figsize = (10, 10)) # Add x-axis and y-axis for plots ax1.plot(monthly_tot_dis_1995_97.index.values, monthly_tot_dis_1995_97['disValue'], color='green', marker = "o") ax2.plot(monthly_tot_dis_2013.index.values, monthly_tot_dis_2013['disValue'], color='black', marker = "o") ax3.plot(monthly_max_dis_1995_97.index.values, monthly_max_dis_1995_97['disValue'], color='blue', marker = "o") ax4.plot(monthly_max_dis_2013.index.values, monthly_max_dis_2013['disValue'], color='red', marker = "o") # Set title and labels for axes ax1.set(xlabel="Date", ylabel="Discharge (CFS)", title="(3a). Monthly total Discharge (CFS) 1995-1997, Boulder, CO") ax2.set(xlabel="Date", ylabel="Discharge (CFS)", title="(3b). Monthly total Discharge (CFS) 2013, Boulder, CO") ax3.set(xlabel="Date", ylabel="Discharge (CFS)", title="(3c). Monthly maximum Discharge (CFS) 1995-1997, Boulder, CO") ax4.set(xlabel="Date", ylabel="Discharge (CFS)", title="(3d). Monthly maximum Discharge (CFS) 2013, Boulder, CO") # Rotate tick marks on x-axis plt.setp(ax1.get_xticklabels(), rotation=45) plt.setp(ax2.get_xticklabels(), rotation=45) plt.setp(ax3.get_xticklabels(), rotation=45) plt.setp(ax4.get_xticklabels(), rotation=45) # Define the date format for plotting date_form1 = DateFormatter("%b-%y") date_form2 = DateFormatter("%b-%d") ax1.xaxis.set_major_formatter(date_form1) ax2.xaxis.set_major_formatter(date_form2) ax3.xaxis.set_major_formatter(date_form1) ax4.xaxis.set_major_formatter(date_form2) plt.tight_layout() plt.show() # -
test_notebooks/test_notebooks_aq/SD_O3_PM25_NOx_2014_scikit_stat_tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests import json from qsforex import settings from datetime import datetime, timedelta import csv ''' This is to learn how to query OANDA for historical price patterns. see here for more details: <http://developer.oanda.com/rest-live/rates/#retrieveInstrumentHistory> The following example illustrates the request format with curl: "https://api-fxtrade.oanda.com/v1/candles?instrument=EUR_USD&count=2&candleFormat=midpoint&granularity=D&dailyAlignment=0&alignmentTimezone=America%2FNew_York" Here's a breakdown: domain: api-fxtrade.oanda.com endpoint: /v1/candles instrument: (req.)pairs requested count: # of candles (opt., default 500) candleFormat: (opt.,) bidask (default) returns both bid and ask candles midpoint returns midpoint between bid and ask granularity: (opt., default=S5) time range represented by ea. candlestick dailyAlignment: (opt.,)The hour of day used to align candles with hourly, daily, weekly, or monthly granularity, defaults to 1700, NY time. alignmentTimezone: (opt.,default is America/NewYork) The timezone to be used for the dailyAlignment parameter. default is America/NewYork This parameter does NOT affect the returned timestamp: START or END PARAMETERS WILL ALWAYS BE IN UTC. Full list available here: http://developer.oanda.com/docs/timezones.txt start(opt.): start time stamp end (opt.)): end time stamp weeklyAlignment (opt.,default; "Friday": The day of the week used to align candles with weekly granularity. The value specified will be used as the start/end day when calculating the weekly candles. Valid values are: “Monday”, “Tuesday”, “Wednesday”, “Thursday”, “Friday”, “Saturday”, “Sunday”. ''' # Some preliminary setup: targetDomain = 'practice' domain = settings.ENVIRONMENTS['api'][targetDomain] access_token = settings.ACCESS_TOKEN account_id = settings.ACCOUNT_ID INST = 'EUR_USD' endpoint = '/v1/candles' DAYS = 5 GRAN = 'M15' # + def GetGranularitySecs(granularity): ''' Parses granularity, specified like S10 or M15 into corresponding number of seconds ''' if granularity[0] == 'S': return int(granularity[1:]) elif granularity[0] == 'M': # minute if len(granularity) > 1: return 60*int(granularity[1:]) else: # month: does not take into account actual month length return 60*60*24*30 elif granularity[0] == 'H': return 60*60*int(granularity[1:]) elif granularity[0] == 'D': return 60*60*24 elif granularity[0] == 'W': return 60*60*24*7 # - def PrepReq(pair,granularity='S5', candle_format="bidask", count=None, daily_alignment=None, alignment_timezone=None, weekly_alignment="Monday", start=None, end=None): params = { "accountId": account_id, "instrument": pair, "candleFormat": candle_format, "granularity": granularity, "count": count, "dailyAlignment": daily_alignment, "alignmentTimezone": alignment_timezone, "weeklyAlignment": weekly_alignment, "start": start, "end": end, } client = requests.Session() requests.packages.urllib3.disable_warnings() url = "https://" + domain + "/v1/candles" headers = {'Authorization': 'Bearer ' + access_token} req = requests.Request('GET', url,headers=headers,params=params) pre = req.prepare() resp = client.send(pre, stream=False, verify=True) if resp.ok: rjson = resp.json() return rjson else: print(resp.text) start = datetime.now() - timedelta(days=DAYS) end = start + timedelta(hours=1) start=start.strftime("%Y-%m-%dT%H:%M:%S.%f%z") end=end.strftime("%Y-%m-%dT%H:%M:%S.%f%z") msg = PrepReq(INST,start=start,end=end) import pandas as pd df = pd.DataFrame(msg['candles']) df.index = pd.to_datetime(df['time']) df.head()
Learning to use OANDA historical prices(candles).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # language: python # name: python3 # --- # # 4.2 String Conversion (Every Class Needs a __repr__ ) # # When you define a custom class in Python and then try to print one of # its instances to the console (or inspect it in an interpreter session), you # get a relatively unsatisfying result. The default “to string” conversion # behavior is basic and lacks detail: class Car: def __init__(self, color, mileage): self.color = color self.mileage = mileage #toString() def __str__(self): return f'a {self.color} car' my_car = Car('red', 37281) print(my_car) # That’s better than nothing , but it’s also not very useful. print(my_car.color, my_car.mileage) my_car2 = Car('red', 37281) print(my_car2)
2022/Python-Tricks-Book/Chapter4-Classes-OOP/string-conversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt sol = np.load('./sol.npy') fig, ax = plt.subplots(figsize=(3, 3)) ax.plot(sol[:, 0], sol[:, 3]) ax.set_xlabel(r'$\theta_1$') ax.set_ylabel(r'$\dot{\theta}_1$') plt.title('Trajectory under learned controller') plt.show()
pySwing/simulation/test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Taivasta kartoittamassa - mitä näemme yllämme? # Tähdet ovat aina kiehtoneet ihmismieltä, mutta mitä me varsinaisesti näemme paitsi kauniiden, kirkkaiden pisteiden peiton? # + # AJA TÄMÄ SOLU ENSIN! Tässä otetaan käyttöön tarvittavat python-paketit. import pandas as pd import numpy as np import matplotlib.pyplot as plt # - # Otetaan tarkasteltavaksi joukko tietoja yllämme näkyvistä kohteista, joiden laajemmat versiot löytyvät selityksineen osoitteesta http://www.astronexus.com/hyg HYG-tietokannasta. Pääpiirteissään kyseessä on sellaisten tähtien joukko, jotka ovat joko tarpeeksi kirkkaita näkyäkseen Maasta käsin tai lähellä Aurinkoa (noh, tähtitieteellisessä mitassa lähellä, 75 valovuoden etäisyydellä). # # Mistä HYG on lyhenne? Käypä selvittämässä! # + # Ajamalla tämän solun saat käyttöösi suuren katalogin tähtien tietoja. # Kiitokset <NAME>elle helposti luettavan setin luomisesta. taivas = pd.read_csv("https://raw.githubusercontent.com/adamlamee/CODINGinK12/master/data/stars.csv") # - # ### Miltä data näyttää? # + # Tällä komennolla näet muuttujan ylimmät rivit. Mitä käy jos vaihdat suluissa olevaa numeroa tai poistat sen? taivas.head(10) # + # Tällä komennolla voit tarkistaa, montako riviä dataa on. len(taivas) # - # Taulukosta nähdään joukko kryptisiä sarakkeita. Tarkemmat selitykset löytyvät [Astronexuksen sivulta](http://www.astronexus.com/hyg), mutta oleellisimmat lienevät tähtien paikkatiedot (rektaskensio "ra", deklinaatio "dec", etäisyys "dist") ja valaisuun liittyvät ominaisuudet (havaittu magnitudi "mag" ja absoluuttinen kirkkaus "absmag", lämpötila "temp" sekä luminositeetti "lum"). Ryhdytään nyt tarkastelemaan mitä näistä voidaan nähdä. # # (Tutustu rektaskensioon ja deklinaatioon [täällä](https://fi.wikipedia.org/wiki/Rektaskensio).) # ### Kerro, kerro, kuvaaja # + # Tässä otetaan "taivas"-tietorakenteesta kaksi saraketta ja luodaan niiden arvopareista kuvaaja. plt.figure(figsize=(15, 4)) plt.scatter(taivas.ra, taivas.dec, s=0.01) # Mitä tietoja tässä käytettiin? plt.xlim(24, 0) plt.title("Kaikkien katalogissa olevien tähtien paikat Maasta katsottuna \n") plt.xlabel('Tarvitsen nimen') # Korjaapa tämä otsikko sopivaksi! plt.ylabel('Akseloin, siis olen') # Korjaapa tämä otsikko sopivaksi! plt.show() # Näetkö muotoja kuvassa? Mistä moiset voisivat johtua? # - # Tähtiä luokitellaan usein lämpötilansa ja kirkkautensa mukaan, siten että kirkkaammilla tähdillä on pienempi absoluuttinen magnitudi. Auringolla se on noin 4,8 (vai oliko? Mitä sanoo tarkastelemamme data?). # + # Piirretään taas x,y-parien mukainen kuvaaja, jonka pisteitä # käsitellään hieman niin, että niistä erottaakin jotakin. plt.figure(figsize = (10,10)) plt.scatter(taivas.temp, taivas.absmag, s = 1, edgecolors = 'none', color = "red") plt.xlim(2000,15000) # Mitä käy jos säädät näitä numeroita? plt.ylim(20,-15) plt.title("Keksipäs tähän kiva otsikko \n") # Muokkaa tämä sopivammaksi. plt.ylabel("Absoluuttinen magnitudi") plt.xlabel("Pintalämpötila (K)") plt.show() # - # Painottuvatko tulokset jonnekin? Erottuvatko jotkin alueet muusta massasta? Löydätkö kuvasta kirkkaan, muttei erityisen lämpimän alueen (jättiläisiä)? Erityisen kuuman ja kirkkaan, mutta pienistä tähdistä koostuvan valkoisten kääpiöiden alueen? # # ### Tähdistöt # Harvempi tähti on yksinäinen, vaan useimmiten ne kuuluvat suurempiin tähtijoukkoihin joille on annettu erilaisia nimiä pitkin vuosituhansia ja kulttuuripiirejä. Kansainväliseen käyttöön vakiintuneimmat nimet ovat eurooppalaista perua ja löytyvät "taivas"-tietorakenteen sarakkeesta "con" (constellation, tähdistö). # + # Tämä poimii sarakkeesta listan lyhenteitä, jotka vastaavat tähdistöjen nimiä. taivas.sort_values('con').con.unique() # Pystytkö sanallistamaan, mitä tällä rivillä käskettiin? # - # Tarkastellaan seuraavaksi, miten voisimme luokitella kunkin tähdistön sisältämiä tähtiä toisiinsa ja poimia sieltä joitain kiinnostavia yksilöitä. # + # Tämä näyttää yhden tähdistön, vaihda kysyttyä lyhennettä alla ja tutki muitakin! # Komento .query('...') antaa etsiä tietyntyyppistä dataa koko aineistosta. constellation = taivas.query('con == "Vir"') # Käytetään "subplot"-ominaisuutta piirtämään useita asioita yhtä aikaa. plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) # Tällä saat näkyviin kyseisen tähdistön 10 kirkkainta. Mitäs jos .headin arvoa muuttaisi? constellation = constellation.sort_values('mag').head(10) plt.scatter(constellation.ra, constellation.dec) plt.gca().invert_xaxis() plt.title("Tähdistön kuva taivaalla \n") plt.xlabel('Rektaskensio') plt.ylabel('Deklinaatio') plt.subplot(1, 2, 2) # Okei, mutta mites nämä suhtautuvat koko käsiteltyyn aineistoon? Nyt lyödäänkin yhden tähdistön kirkkaimmat # pisteet koko kartalle näkyviin. plt.scatter(taivas.temp, taivas.absmag, s = 1, edgecolors = 'none', alpha = 0.2) plt.scatter(constellation.temp, constellation.absmag, color = 'red', edgecolors = 'none') plt.xlim(2000,15000) plt.ylim(20,-15) plt.title("Verrattuna koko taivaaseen \n") plt.ylabel("Absoluuttinen magnitudi") plt.xlabel("Lämpötila (K)") plt.show() # - # Miten tuloksesi asettuvat tähtien elinkaarta kuvaavalle Hertzsprung-Russell -kuvaajalle? (huomaa akselit!) # # <img src = "https://8green2017.files.wordpress.com/2013/04/hr_diagram.jpg"> # ### Aurinko, lähin tähtemme # Aurinko on monin tavoin tärkeä meille ihmisille, niin planeettamme elämän mahdollistajana kuin parhaana tutkimuskohteenakin selvittäessämme, miten tähdet toimivat. Verrattuna kaikkeen muuhun koko aurinkokunnassa se on valtava, mutta kaikkien tähtien joukossa melkoinen höyhensarjalainen. Tarkastellaan vielä, miten se vertautuu näihin lähisukulaisiinsa. # + # Aurinko, Sol, voidaan etsiä erilleen muista hakukomennolla. aurinko = taivas.query('proper == "Sol"') print(aurinko) # + # Jos nyt haluaisimme esimerkiksi vertailla sen lämpötilaa muihin... isommat = taivas[taivas.temp > float(aurinko.temp)] # tässä float() on tarpeen muuttamaan # tyypit samanlaisiksi, eli liukuluvuiksi pienet = taivas[taivas.temp < float(aurinko.temp)] print("Aurinkoa kuumempia tähtiä on listassa " + repr(len(isommat))) print("Aurinkoa kylmempiä tähtiä on listassa " + repr(len(pienet))) # - # Miten saisit vertailtua tähtien kirkkauksia (sekä havaittavaa että absoluuttista) ylläolevaan tapaan? Kokeile!
Demot/Muut_aiheet/Taivasta_kartoittamassa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from math import pi from spatialmath import * import ipywidgets as widgets import matplotlib.pyplot as plt # %matplotlib notebook # %matplotlib notebook # - # # Working in 3D # ## Rotation # Rotations in 3D can be represented by rotation matrices – 3x3 orthonormal matrices – which belong to the group $\mbox{SO}(3)$. These are a subset of all possible 3x3 real matrices. # # We can create such a matrix, a rotation of $\pi/4$ radians around the x-axis by R1 = SO3.Rx(pi/4) # which is an object of type type(R1) # which contains an $\mbox{SO}(3)$ matrix. We can display that matrix R1 # which is colored red if the console supports color. # # The matrix, a numpy array, is encapsulated and not directly settable by the user. This way we can ensure that the matrix is proper member of the $\mbox{SO}(3)$ group. # # We can _compose_ these rotations using the Python `*` operator R1 * R1 # which is a rotation by $\pi/4$ _then_ another rotation by $\pi/4$ which is a total rotation of $\pi/2$ about the X-axis. We can doublecheck that SO3.Rx(pi/2) # We could also have used the exponentiation operator R1**2 # We can also specify the angle in degrees SO3.Rx(45, 'deg') # We can visualize what this looks like by fig = plt.figure() # create a new figure SE3().plot(frame='0', dims=[-1.5,1.5], color='black') R1.plot(frame='1') # Click on the coordinate frame and use the mouse to change the viewpoint. The world reference frame is shown in black, and the rotated frame is shown in blue. # Often we need to describe more complex orientations and we typically use a _3 angle_ convention to do this. Euler's rotation theorem says that any orientation can be expressed in terms of three rotations about different axes. # # One common convention is roll-pitch-yaw angles R2 = SO3.RPY([10, 20, 30], unit='deg') R2 # which says that we rotate by 30&deg; about the Z-axis (yaw), _then_ 20&deg; about the Y-axis (pitch) and _then_ 10&deg; about the X-axis – this is the ZYX roll-pitch yaw convention. Note that: # # 1. the first rotation in the sequence involves the last element in the angle sequence. # 2. we can change angle convention, for example by passing `order='xyz'` # # We can visualize the resulting orientation. plt.figure() # create a new figure SE3().plot(frame='0', dims=[-1.5,1.5], color='black') R2.plot(frame='2') # We can convert any rotation matrix back to its 3-angle representation R2.rpy() # ### Constructors # # The default constructor yields a null rotation SO3() # which is represented by the identity matrix. # # The class supports a number of variant constructors using class methods: # # | Constructor | rotation | # |---------------|-----------| # | SO3() | null rotation | # | SO3.Rx(theta) | about X-axis | # | SO3.Ry(theta) | about Y-axis| # | SO3.Rz(theta) | about Z-axis| # | SO3.RPY(rpy) | from roll-pitch-yaw angle vector| # | SO3.Eul(euler) | from Euler angle vector | # | SO3.AngVec(theta, v) | from rotation and axis | # | SO3.Omega(v) | from a twist vector | # | SO3.OA | from orientation and approach vectors | # Imagine we want a rotation that describes a frame that has its y-axis (o-vector) pointing in the world negative z-axis direction and its z-axis (a-vector) pointing in the world x-axis direction SO3.OA(o=[0,0,-1], a=[1,0,0]) # We can redo our earlier example using `SO3.Rx()` with the explicit angle-axis notation SO3.AngVec(pi/4, [1,0,0]) # or SO3.Exp([pi/4,0,0]) # or a more complex example SO3.AngVec(30, [1,2,3], unit='deg') # ### Properties # # The object has a number of properties, such as the columns which are often written as ${\bf R} = [n, o, a]$ where $n$, $o$ and $a$ are 3-vectors. For example R1.n # or its inverse (in this case its transpose) R1.inv() # the shape of the underlying matrix R1.shape # and the order R1.N # indicating it operates in 3D space. # ### Predicates # # We can check various properties of the object using properties and methods that are common to all classes in this package [R1.isSE, R1.isSO, R1.isrot(), R1.ishom(), R1.isrot2(), R1.ishom2()] # The last four in this list provide compatibility with the Spatial Math Toolbox for MATLAB. # ## Quaternions # # A quaternion is often described as a type of complex number but it is more useful (and simpler) to think of it as an order pair comprising a scalar and a vector. We can create a quaternions q1 = Quaternion([1,2,3,4]) q1 # where the scalar is before the angle brackets which enclose the vector part. # # Properties allow us to extract the scalar part q1.s # and the vector part q1.v # and we can represent it as a numpy array q1.vec # A quaternion has a conjugate q1.conj() # and a norm, which is the magnitude of the equivalent 4-vector q1.norm() # We can create a second quaternion q2 = Quaternion([5,6,7,8]) q2 # Operators allow us to add q1 + q2 # subtract q1 - q2 # and to multiply q1 * q2 # which follows the special rules of Hamilton multiplication. # # Multiplication can also be performed as the linear algebraic product of one quaternion converted to a 4x4 matrix q1.matrix # and the other as a 4-vector q1.matrix @ q2.vec # The product of a quaternion and its conjugate is a scalar equal to the square of its norm q1 * q1.conj() # Conversely, a quaternion with a zero scalar part is called a _pure quaternion_ Quaternion.Pure([1, 2, 3]) # ### Unit quaternions # # A quaternion with a unit norm is called a _unit quaternion_ . It is a group and its elements represent rotation in 3D space. It is in all regards like an $\mbox{SO}(3)$ matrix except for a _double mapping_ -- a quaternion and its element-wise negation represent the same rotation. q1 = UnitQuaternion.Rx(30, 'deg') q1 # the convention is that unit quaternions are denoted using double angle brackets. The norm, as advertised is indeed one q1.norm() # We create another unit quaternion q2 = UnitQuaternion.Ry(-40, 'deg') q2 # The rotations can be composed by quaternion multiplication q3 = q1 * q2 q3 # We can convert a quaternion to a rotation matrix q3.R # which yields exactly the same answer as if we'd done it using SO(3) rotation matrices SO3.Rx(30, 'deg') * SO3.Ry(-40, 'deg') # The advantages of unit quaternions are that # # 1. they are compact, just 4 numbers instead of 9 # 2. multiplication involves fewer operations and is therefore faster # 3. numerical errors build up when we multiply rotation matrices together many times, and they lose the structure (the columns are no longer unit length or orthogonal). Correcting this, the process of _normalization_ is expensive. For unit quaternions errors will also compound, but normalization is simply a matter of dividing through by the norm # Unit quaternions have an inverse q2.inv() q1 * q2.inv() # or q1 / q2 # We can convert any unit quaternion to an SO3 object if we wish q1.SO3() # and conversely, any `SO3` object to a unit quaternion UnitQuaternion( SO3.Rx(30, 'deg')) # A unit quaternion is not a minimal representation. Since we know the magnitude is 1, then with any 3 elements we can compute the fourth upto a sign ambiguity. q1.vec3 a = UnitQuaternion.qvmul( q1.vec3, q2.vec3) a # from which we can recreate the unit quaternion UnitQuaternion.Vec3(a) # ## Representing position # # In robotics we also need to describe the position of objects and we can do this with a _homogeneous transformation_ matrix – a 4x4 matrix – which belong to the group $\mbox{SE}(3)$ which is a subset of all 4x4 real matrices. # # We can create such a matrix, for a translation of 1 in the x-direction, 2 in the y-direction and 3 in the z-direction by T1 = SE3(1, 2, 3) T1 # which is displayed in a color coded fashion: rotation matrix in red, translation vector in blue, and the constant bottom row in grey. We note that the red matrix is an _identity matrix_ . # The class supports a number of variant constructors using class methods. # # | Constructor | motion | # |---------------|-----------| # | SE3() | null motion | # | SE3.Tx(d) | translation along X-axis | # | SE3.Ty(d) | translation along Y-axis | # | SE3.Tz(d) | translation along Z-axis | # | SE3.Rx(theta) | rotation about X-axis | # | SE3.Ry(theta) | rotation about Y-axis| # | SE3.Rz(theta) | rotation about Z-axis| # | SE3.RPY(rpy) | rotation from roll-pitch-yaw angle vector| # | SE3.Eul(euler) | rotation from Euler angle vector | # | SE3.AngVec(theta, v) | rotation from rotation and axis | # | SO3.Omega(v) | from a twist vector | # | SE3.OA(ovec, avec) | rotation from orientation and approach vectors | # We can visualize this plt.figure() # create a new figure SE3().plot(frame='0', dims=[0,4], color='black') T1.plot(frame='1') # We can define another translation T12 = SE3(2, -1, -2) # and compose it with `T1` T2 = T1 * T12 T2.plot(frame='2', color='red') # ## Representing pose T1 = SE3(1, 2, 3) * SE3.Rx(30, 'deg') T1 # Is a composition of two motions: a pure translation and _then_ a pure rotation. We can see the rotation matrix, computed above, in the top-left corner and the translation components in the right-most column. In the earlier example `Out[24]` was simply a null-rotation which is represented by the identity matrix. # # The frame now looks like this plt.figure() # create a new figure SE3().plot(frame='0', dims=[0,4], color='black') T1.plot(frame='1') # ### Properties # # The object has a number of properties, such as the columns which are often written as $[n, o, a]$ T1.o # or its inverse (computed in an efficient manner based on the structure of the matrix) T1.inv() # We can extract the rotation matrix as a numpy array T1.R # or the translation vector, as a numpy array T1.t # The shape of the underlying SE(3) matrix is T1.shape # and the order T1.N # indicating it operates in 3D space. # ### Predicates # # We can check various properties [T1.isSE, T1.isSO, T1.isrot(), T1.ishom(), T1.isrot2(), T1.ishom2()] # ### A couple of important points: # # When we compose motions they must be of the same type. An `SE3` object can represent pure transation, pure rotation or both. If we wish to compose a translation with a rotation, the rotation must be an `SE3` object - a rotation plus zero translation. # # SUperset # ### Transforming points # Imagine now a set of points defining the vertices of a cube P = np.array([[-1, 1, 1, -1, -1, 1, 1, -1], [-1, -1, 1, 1, -1, -1, 1, 1], [-1, -1, -1, -1, 1, 1, 1, 1]]) P # defined with respect to a body reference frame ${}^A P_i$. Given a transformation ${}^0 \mathbf{T}_A$ from the world frame to the body frame, we determine the coordinates of the points in the world frame by ${}^0 P_i = {}^0 \mathbf{T}_A \, {}^A P_i$ which we can perform in a single operation Q = T1 * P # which we can now plot # + fig = plt.figure() SE3().plot(frame='0', dims=[-2,3,0,5,0,5], color='black') ax = plt.gca() ax.set_xlabel('X'); ax.set_ylabel('Y'); ax.set_zlabel('Z'); ax.scatter(xs=Q[0], ys=Q[1], zs=Q[2], s=20) # draw vertices # draw lines joining the vertices lines = [[0,1,5,6], [1,2,6,7], [2,3,7,4], [3,0,4,5]] for line in lines: ax.plot([Q[0,i] for i in line], [Q[1,i] for i in line], [Q[2,i] for i in line]) # - # This is often used in SLAM and bundle adjustment algorithms since it is compact and better behaved than using roll-pitch-yaw or Euler angles. # ## Twists # A twist is an alternative way to represent a 3D pose, but it is more succinct, comprising just 6 values. In constrast an SE(3) matrix has 16 values with a considerable amount of redundancy, but it does offer consider computational convenience. # # Twists are the logarithm of an SE(3) matrix T = SE3.Rand() T T.log() # How do we know this is really the logarithm? Well, we can exponentiate it lg = T.log() SE3.Exp(lg) # and we have reconstituted our original matrix. # # The logarithm is a matrix with a very particular structure, it has a zero diagonal and bottom row, and the top-left 3x3 matrix is skew symmetric. This matrix has only 6 unique elements: three from the last column, and three from the skew symmetric matrix, and we can request the `log` method to give us just these T.log(twist=True) # This 6-vector is a twist, a concise way to represent the translational and rotational components of a pose. Twists are represented by their own class tw = Twist3(T) tw # Just like the other pose objects, `Twist3` objects can have multiple values. # # Twists can be composed T = SE3(1, 2, 3) * SE3.Rx(0.3) tw = Twist3(T) tw # Now we can compose the twists tw2 = tw * tw tw2 # and the result is just the same as if we had composed the transforms Twist3(T * T) # Twists have great utility for robot arm kinematics, to compute the forward kinematics and Jacobians. Twist objects have a number of methods. # # The adjoint is a 6x6 matrix that relates velocities tw.Ad() # and the `SE3` object also has this method. # # The logarithm of the adjoint is given by tw.ad() # The name twist comes from considering the rigid-body motion as a rotation and a translation along a unique line of action. It rotates as it moves along the line following a screw like motion, hence its other name as a _screw_. The line in 3D space is described in Plücker coordinates by tw.line() # The pitch of the screw is tw.pitch() # and a point on the line is tw.pole() # # Working in 2D # Things are actually much simpler in 2D. There's only one possible rotation which is around an axis perpendicular to the plane (where the z-axis would have been if it were in 3D). # # Rotations in 2D can be represented by rotation matrices – 2x2 orthonormal matrices – which belong to the group SO(2). Just as for the 3D case these matrices have special properties, each column (and row) is a unit vector, and they are all orthogonal, the inverse of this matrix is equal to its transpose, and its determinant is +1. # # We can create such a matrix, a rotation of $\pi/4$ radians by R = SO2(pi/4) R # or in degrees SO2(45, unit='deg') # and we can plot this on the 2D plane plt.figure() # create a new figure R.plot() # Once again, it's useful to describe the position of things and we do this this with a homogeneous transformation matrix – a 3x3 matrix – which belong to the group SE(2). T = SE2(1, 2) T # which has a similar structure to the 3D case. The rotation matrix is in the top-left corner and the translation components are in the right-most column. # # We can also call the function with the element in a list T = SE2([1, 2]) plt.figure() # create a new figure T.plot() T2 = SE2(45, unit='deg') T2 plt.figure() # create a new figure T2.plot() # The inplace versions of operators are also supported, for example X = T X /= T2 X # # Operators # # ## Group operators # # For the 3D case, the classes we have introduced mimic the behavior the mathematical groups $\mbox{SO}(3)$ and $\mbox{SE}(3)$ which contain matrices of particular structure. They are subsets respectively of the sets of all possible real 3x3 and 4x4 matrices. # # The only operations on two elements of the group that also belongs to the group are composition (represented by the `*` operator) and inversion. T1 = SE3(1, 2, 3) * SE3.Rx(30, 'deg') [type(T1), type(T1.inv()), type(T1*T1)] # If we know the pose of frame {2} and a _rigid body motion_ from frame {1} to frame {2} T2 = SE3(4, 5, 6) * SE3.Ry(-40, 'deg') T12 = SE3(0, -2, -1) * SE3.Rz(70, 'deg') # then ${}^0{\bf T}_1 \bullet {}^1{\bf T}_2 = {}^0{\bf T}_2$ then ${}^0{\bf T}_1 = {}^1{\bf T}_2 \bullet ({}^0{\bf T}_2)^{-1}$ which we write as T1 * T2.inv() # or more concisely as T1 / T2 # Exponentiation is also a group operator since it is simply repeated composition T1 ** 2 # ## Non-group operations # # Operations such as addition and subtraction are valid for matrices but not for elements of the group, therefore these operations will return a numpy array rather than a group object SE3() + SE3() # yields an array, not an `SE3` object. As do other non-group operations 2 * SE3() SE3() - 1 # Similar principles apply to quaternions. Unit quaternions are a group and only support composition and inversion. Any other operations will return an ordinary quaternion UnitQuaternion() * 2 # which is indicated by the single angle brackets. # ## In-place operators # # All of Pythons in-place operators are available as well, whether for group or non-group operations. For example T = T1 T *= T2 T **= 2 # # Multi-valued objects # # For many tasks we might want to have a set or sequence of rotations or poses. The obvious solution would be to use a Python list T = [ SE3.Rx(0), SE3.Rx(0.1), SE3.Rx(0.2), SE3.Rx(0.3), SE3.Rx(0.4)] # but the pose objects in this package can hold multiple values, just like a native Python list can. There are a few ways to do this, most obviously T = SE3( [ SE3.Rx(0), SE3.Rx(0.1), SE3.Rx(0.2), SE3.Rx(0.3), SE3.Rx(0.4)] ) # which has type of a pose object type(T) # but it has length of five len(T) # that is, it contains five values. We can see these when we display the object's value T # We can index into the object (slice it) just as we would a Python list T[3] # or from the second element to the last in steps of two T[1:-1:2] # We could another value to the end T.append( SE3.Rx(0.5) ) len(T) # The `SE3` class, like all the classes in this package, inherits from the `UserList` class giving it all the methods of a Python list like append, extend, del etc. We can also use them as _iterables_ in _for_ loops and in list comprehensions. # # You can create an object of a particular type with no elements using this constructor T = SE3.Empty() len(T) # which is the equivalent of setting a variable to `[]`. # We could write the above example more succinctly T = SE3.Rx( np.linspace(0, 0.5, 5) ) len(T) T[3] # Consider another rotation T2 = SE3.Ry(40, 'deg') # If we write A = T * T2 len(A) # we obtain a new list where each element of `A` is `T[i] * T2`. Similarly B = T2 * T len(B) # which has produced a new list where each element of `B` is `T2 * T[i]`. # # Similarly C = T * T len(C) # yields a new list where each element of `C` is the `T[i] * T[i]`. # # We can apply such a sequence to a coordinate vectors as we did earlier P = T * [0, 1, 0] P # where each element of `T` has transformed the coordinate vector (0, 1, 0), the results being consecutive columns of the resulting numpy array. # # This is equivalent to writing np.column_stack([x * [0,1,0] for x in T]) # ## C++ like programming model # Lists are useful, but we might like to use a programming model where we allocate an array of pose objects and reference them or assign to them. We can do that to! # + T = SE3.Alloc(5) # create a vector of SE3 values for i, theta in enumerate(np.linspace(0, 1, len(T))): T[i] = SE3.Rz(theta) T # - # ``Alloc`` initializes every element to the identity value. This technique works for all pose objects.
spatialmath/introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/keerthn123/demo1/blob/main/Untitled0.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="7s2mKgOHJS8N" # 1. Read a signal peptide sequence from the user into a variable, change the case and print it. Also print the length of the sequence. (Hint- use string manipulation functions) # + id="iCdEtluMpaqO" signal_peptide_sequence = input("Enter the Signal Peptide Sequence : ") signal_peptide_sequence = signal_peptide_sequence.upper() length = len(signal_peptide_sequence) print(signal_peptide_sequence) print("The length of signal peptide sequence : "+str(length)) # + [markdown] id="cO14NyMK1VNe" # 2. Given DNA sequance is = "GTGCATCTGACTCCTGAGGAGAAG" # Generate to mRNa sequence using python # + id="YMzoJZtZ2QDm" sequance = 'AATGCATATGCA' sequance dna2rna = {'A': 'U', 'T': 'A', 'C': 'G', 'G': 'C'} rna_seq = str() for nucleotide in sequance: rna_seq += dna2rna[nucleotide] print('DNA', sequance) print('mRNA', rna_seq) # + [markdown] id="ffGqJMaJ_Wwe" # OR # + id="DD1lwQQA-ja9" DNAseq=input('ENTER DNA SEQUENCE') DNAseq=DNAseq.upper() mRNAseq=DNAseq.replace('A','U') mRNAseq=mRNAseq.replace('T','A') mRNAseq=mRNAseq.replace("G","x") mRNAseq=mRNAseq.replace("C","G") mRNAseq=mRNAseq.replace("x","C") print("DNA sequence: ",DNAseq) print("mRNA sequence: ",mRNAseq) # + [markdown] id="XpSBmYGpDTDA" # 3 = Read a DNA sequence and print the number of different bases in the sequence. (Hint-use count function) # + id="UzogCMbSDZ2c" dna_sequance = input('ENTER DNA SEQUENCE = ') dna_sequance = dna_sequance.upper() a=dna_sequance.count("A") b=dna_sequance.count("G") c=dna_sequance.count("C") d=dna_sequance.count("T") print(a) print(b) print(c) print(d) # + [markdown] id="-cuGON-xJeh_" # 4. Read a DNA sequence from the user and calculate the percentage of GC content in the sequence. (Hint-use count function) # + colab={"base_uri": "https://localhost:8080/"} id="WDkUSv4HNzOq" outputId="0ea6b2da-985d-40c5-d6ac-d3a43d9f9fc2" import random dna=''.join(random.choice('ATGCN') for i in range(2048)) print(dna) print("A count",round((dna.count("A")/2048)*100),"%") print("T count",round((dna.count("T")/2048)*100),"%") print("G count",round((dna.count("G")/2048)*100),"%") print("C count",round((dna.count("C")/2048)*100),"%") print("AT count",round((dna.count("AT")/2048)*100),"%") print("GC count",round((dna.count("GC")/2048)*100),"%") # + id="50rDUvAOOG6s" sequence = input("DNA sequence = ") def count(seq): a = 0 t = 0 c = 0 g = 0 for n in seq: if n == 'a': a += 1 elif n == 't': t += 1 elif n == 'c': c += 1 elif n == 'g': g += 1 length = Len(seq) return a / length, t / length, c / length, g / length per = count(sequence) print("The GC content is = " + str((per[2] + per[3])* 100) +"%") # + id="yw7Nwc5LZZS5"
Untitled0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Can You Find Voyager 1? # ## A Breakthrough Listen Radio SETI Tutorial # Originally written by <NAME>, edited by <NAME> # [Voyager 1](https://en.wikipedia.org/wiki/Voyager_1) is the most distant human-made object from Earth. Launched by NASA in 1977, it has travelled at fantastic speed (roughly 17,000 m/s), past the outer boundaries of our Solar System and into interstellar space (>13.8 billion miles from the Sun). # # Remarkably, 43 years on, Voyager 1 is still sending telemetry data from the depths of interstellar space. This makes it a great systems test for the Breakthrough Listen signal processing pipeline. # # In this tutorial, we load, read, and plot some Breakthrough Listen (BL) observations of Voyager 1. The data were taken using the [Robert C. Byrd Greenbank Telescope](https://science.nrao.edu/facilities/gbt) in West Virginia. # ### About the data # We used the Greenbank X-band receiver (8.0-11.6 GHz) on December 30, 2015, to observe the known position of Voyager 1. The BL digital signal processing system saves digitized data in a 'raw' format, which we have converted into 'filterbank' format using our gpuspec code, and then into a more efficient '.h5' format. For more information about the `filterbank` format, see the [SIGPROC user guide](http://sigproc.sourceforge.net/sigproc.pdf) or the "Finding ETI" tutorial. # # For the purposes of this tutorial, we will be using the 50 MB Voyager1.single_coarse.fine_res.h5 file consisting of a subset of frequencies (corresponding to a single coarse channel) which should contain the Voyager signal. This datafile is too big to store on GitHub, so we will download it from its location on the [Intro2Astro Google Drive](https://drive.google.com/file/d/1mEcojp_bfiYxaB0JDOzW9KjqEqyJOjop/view?usp=sharing) - click that link now to do so. As large as it is, this is only 1/64th of the normal size of a single BL datafile! # *** # ## Step 0: Installing Necessary Software ( `blimpy` and `turboSETI`) # Before we do anything else, we need to download the BL-specific software that you'll be using for this project. **If you already have `blimpy` and `turboSETI` downloaded from another tutorial you can skip this step** # ### Downloading `blimpy` # The first package to download is called `blimpy`, which stands for "Breakthrough Listen I/O Methods for Python". The `blimpy` package helps us read, investigate, and plot `filterbank` (.fil) files, which you can think of as data cubes which store *power* values at each *frequency* we recorded over *time*. # # We are going to use the development version of blimpy hosted by the UC Berkeley SETI Center on [GitHub](https://github.com/UCBerkeleySETI/blimpy). Because we're using the dev version instead of the stable, `pip install`able version, we'll install this with git instead of pip. Go to a terminal window and use `cd` to navigate to your Week 7 folder\* (use `ls` to make sure you're in the right place). Now type `git clone https://github.com/UCBerkeleySETI/blimpy.git` (you can copy-paste the address from here, or the GitHub page using the `Code` button). You should see some messages about the code downloading, and, once it's finished, you should have a new directory called `blimpy` in your Week 7 folder. Use `cd` to navigate into it, and then type `pip install .` to install the code you just downloaded. This will take a minute or less and it should download (among itself, and other packages if necessary) something called `h5py`. `.h5` files are a more efficient way to store the same info as `.fil` files, and BL uses the external `h5py` software to do the writing/reading for this format. # # \*If you are not doing this tutorial through the Intro2Astro class, just navigate to whichever folder contains this Jupyter Notebook and the data file you downloaded. # ### Downloading `turboSETI` # The second package we'll need to download is called `turboSETI` and is the official BL narrowband signal search pipeline. To break that down, turboSETI looks for very narrow frequency bands (~a few Hz) that contain anomalously high powers throughout an observation. It is even capable of following these signals if they drift off of their original frequency due to relative accelerations between the receiver on Earth and the transmitter out in space (very likely, as radial accelerations are *everywhere* in the universe). Finding these narrow signals would be exciting, because there's no astrophysical phenomenon that we know of that can get to bandwidths smaller than ~kHz (and those are radio masers, which are a very interesting topic in their own right). Not only that, but these very narrow signals are how we communicate to each other via GPS, cell signals, etc. on Earth! # # We are going to use my (Sofia Sheikh's) [personal version](https://github.com/Sofysicist/turbo_seti) of the `turboSETI` code, instead of the official version, because there are some features that I've developed, especially with the plotting in the end, that haven't been integrated into the official repository yet. To do this, we'll follow the same steps as we did for `blimpy`. # # Go back to your terminal window and make sure you're in the Week 7 folder (use `ls` to make sure you're in the right place - you'll have to do a `cd ..` if you're still in the `blimpy` subfolder). Now type `git clone https://github.com/Sofysicist/turbo_seti.git` (again, you can copy-paste the address from here, or the GitHub page using the `Code` button). The code should download, and, once it's finished, you should have a new directory called `turbo_seti` in your Week 7 folder. Use `cd` to navigate into it, and then type `pip install .` to install the code. This install should be even quicker than `blimpy`. # Whew! Alright, now we're ready to import those packages, and jump into the analysis! # *** # ## Let's get started! # Start with our standard command to get plots to show nicely in the notebook: # %matplotlib inline # Now let's import a few useful packages: # # - `pylab` for plotting # - the `Waterfall()` class from `blimpy` for reading in the data # - the `seti_event` module from `turbo_seti` for finding Voyager within our dataset # - the `find_event` module from `turbo_seti` for displaying the results of `seti_event` import pylab as plt from blimpy import Waterfall import turbo_seti.find_doppler.seti_event as turbo import turbo_seti.find_event as find # Now, let's define the filename and read in the observation data using `Waterfall()`: file_path = "VOYAGER_guppi_58202_60970_VOYAGER_0060.gpuspec.0000.h5" obs = Waterfall(file_path) # We can get basic info about the file by reading the filterbank header. The `info()` method will print and format this information nicely: obs.info() # There's a lot of information here, and if you're interested in the specifics, you can check out the [BL data format paper](https://arxiv.org/abs/1906.07391). But here's a quick rundown of the important parameters: # # **fch1:** The frequency of the first (maximum) frequency channel in the file (MHz) # # **foff:** The frequency increment of the file - also known as the frequency resolution / channel width / bin size (MHz) # # **nchans:** The number of frequency channels in the file # # **source_name:** The name of the source we were pointed at # # **src_dej:** The J2000 declination of the observation # # **src_raj:** The J2000 right ascension of the observation # # **tsamp:** The sample time / time bin size / time resolution of the file (seconds) # # **tstart (MJD):** The start time of the observation, in Modified Julian Date # # **Num ints in file:** The number of time bins in the file # # The selection info at the bottom kindly tells you the data cube shape and the minimum and maximum frequencies in the observation, derived from the quantities that we just discussed. The data has the shape (16, 1, 1048576), meaning that is has 16 time integrations, and 1048576 channels, over a bandwidth of 2.9 MHz. For the record, that's a lot of channels! The corresponding channel bandwidth is far narrower (Hz) than that required for most astrophysical observations (kHz). # # The header and data can also be accessed directly as objects in the `Waterfall()` class using `obs.header` and `obs.data`. # # Finding the Signal # We're going to use the Breakthrough Listen narrowband signal search software `turboSETI` to search for potential "hits" in this file: bright spikes at single frequency bins. Finding a statistically-significant narrowband "hit" is the first step to finding ETI with this method. # <span style="color:red">We know that astrophysical objects will never produce narrowband signals - so why don't we immediately celebrate when we see one in our data?</span> (hint: there are many valid answers) # *** # `turboSETI` can be run on `.h5` or `.fil` files, and luckily our Voyager observation is already an `.h5`. When we perform the search for hits with the `turbo.FindDoppler` function, we will have to specify the minimum signal-to-noise ratio and the maximum drift rate that we'd like in our search. For a more in-depth analysis of these parameters, check out the Finding_ETI notebook. Briefly: because Voyager is not fixed on the surface of Earth, it has a relative motion with respect to the telescope, which causes its apparent frequency to drift. The `max_drift` parameter specifies the maximum drift rate for the search with a unit of Hz/sec. `snr` sets the signal-to-noise ratio threshold for a spike to be considered a hit and added to the output table. The values that have been chosen below are tuned to make the rest of the notebook run smoothly (because in the case of Voyager, we actually know what we're looking for!) but feel free to play with them if you're curious. # # This is the most computationally-heavy step. For our small example file, it can still take up to a minute to run the next step - be patient! # # If you want to run this cell again for any reason, be sure to *remove the previous output files* - they will not overwrite properly. # + file = "VOYAGER_guppi_58202_60970_VOYAGER_0060.gpuspec.0000.h5" max_drift_rate = 5 #Hz/s signal_to_noise = 20 find_seti_event = turbo.FindDoppler(file, max_drift=max_drift_rate, snr=signal_to_noise) find_seti_event.search() # - # First thing's first: let's see what the results of the search were! These results were saved in a `.dat` file written to your Week7 folder. We can investigate what's in that file by reading it into a `pandas` dataframe with the `read_dat` function below: find.read_dat("VOYAGER_guppi_58202_60970_VOYAGER_0060.gpuspec.0000.dat") # There's a lot of information stored in the rows of this table, but for now just remember the values in the `Freq` column - those values are where we should expect to see something in the following plots. # ## Plotting the data # Now let's hop back to `blimpy` for the plotting, so we can visualize the data that produced the events in the table. The `Waterfall()` class provides a `plot_spectrum()` function to plot the data: obs.plot_spectrum(logged=True) # <span style="color:red">Do those spikes in the data seem to line up with what was in the `pandas` dataframe above?</span> # *** # What we see in the plot above is the power spectral density (or simply "spectrum") of the data contained in the filterbank file. The `logged=True` tells the plotting script to display the signal on a decibel scale, which makes it easier to see high dynamic range. # # Apparent in the plot are the edges of the digital filters (the rolloff on the far left and far right edges), and a big spike in power in the centre. This is known as the "DC bin", and is an artifact caused by the use of the Fast Fourier Transform (FFT) to create spectra. # # The code reports that it's `extracting integration 0` because it is only plotting the first time bin of the file - even with only an 18 second integration on the Green Bank Telescope, we can clearly see Voyager's transmission! # # Now let's zoom in on the part of the plot where Voyager appears, to get a better sense for what the signal structure looks like. Use the values from your table and the plot above to decide on a reasonable `f_start` and `f_stop` for the zoomed-in plot below. The baseline region around the signal looks pretty flat, so if we exclude the DC bin from our plot, we won't need to use `logged=True` for the next plot. f_start = #your value here! #MHz f_stop = #your value here! #MHz obs.plot_spectrum(f_start=f_start, f_stop=f_stop) # If you see the triple-spike feature, you found the telemetry signal from Voyager 1! What you see is the carrier (center) and two sidebands that carry the data. # # We can zoom in on these to see even more features. Choose your frequency limits for each of the three following plots, and play with them to get a good look at the three parts of the signal. You should be able to see some interesting structure in the sidebands. # + left_start = #your value here! #MHz left_stop = #your value here! #MHz center_start = #your value here! #MHz center_stop = #your value here! #MHz right_start = #your value here! #MHz right_stop = #your value here! #MHz plt.figure(figsize=(8, 6)) plt.subplot(3,1,1) obs.plot_spectrum(f_start=left_start, f_stop=left_stop) # left sideband plt.subplot(3,1,2) obs.plot_spectrum(f_start=center_start, f_stop=center_stop) # carrier plt.subplot(3,1,3) obs.plot_spectrum(f_start=right_start, f_stop=right_stop) # right sideband plt.tight_layout() # - # In radio telecommunications, a "carrier wave" is an extremely bright, non-information-carrying signal that helps the receiver locate the wide-band but less luminous "sidebands", which are the amplitude-modulated or frequency-modulated regions that actually carry the information (in this case, Voyager's position, velocity, instrument readings etc.). # Finally, let's look at the entire observation instead of just the first 18 seconds. We can see this by plotting what's known as a 'waterfall plot', or dynamic spectrum, which shows the spectrum as a function of time. I recommend that you look at one of the side-bands in the frequency bounds below, because the behaviour is easier to see in the wider signal. start_f_wat = #your value here! #MHz stop_f_wat = #your value here! #MHz obs.plot_waterfall(f_start=start_f_wat, f_stop=stop_f_wat) # In this plot, the two axes are frequency channel and time (running from bottom to top in this example!), and the color corresponds to power level. When NASA communicates with the spacecraft, it needs to take into account the "Doppler drift rate", or the fact that the signal drifts across frequencies over the course of the observation. In this observation, that's mostly due to the fact that the Earth was rotating away from Voyager as we observed! Likewise, when Breakthrough Listen is searching for SETI signals, we need to search across a range of different doppler drift rates to maximize the signal-to-noise of a potential signal, for which we won't know the actual drift rate. # <span style="color:red">How do you know, from just this plot, that the Earth was rotating away from Voyager and not towards it?</span> # *** # ## Discussion # Let's take a moment to pause and reflect. This faint signal has travelled billions of miles through space, back to us here on Earth. Launching this probe out of the Solar System was an immense feat, and the fact that we can detect it still, 43 years after its launch, is even more impressive. # # The signals from probes such as Voyager are very different from the signals we measure from astrophysical objects. In Breakthrough Listen, we use this clear difference between natural and artificial signals in our approach for detecting extraterrestrial signals. # # CONGRATULATIONS! You have finished this tutorial :)
Week7_SETI/Finding_Voyager/Finding_Voyager.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.5 64-bit (''base'': conda)' # name: python3 # --- # + from src.data.dataloaders import GMC_DataLoader, NIH_DataLoader, ENV_DataLoader # %load_ext autoreload # %autoreload 2 # - # ## DataLoader notebook # # This notebook is intended to show the multiple environments and datasets studied in this project. In particular we started from the NIH and GMC datasets, which both provide a large amount of labeled Chest-XRays images for pneumonia prediction. # + train_loaders = [ENV_DataLoader(batch_size=4, shuffle=True, num_workers=0, pin_memory=True, stage = "train", type = i+1) for i in range(2)] test_loader = ENV_DataLoader(batch_size=4, shuffle=True, num_workers=0, pin_memory=True, stage = "test") # - train_loaders[0].show(iter = 1, type="NORMAL") test_loader.show(iter = 1, type="PNEUMONIA")
notebooks/dataloader.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=false editable=false nbgrader={"checksum": "346b99c4a9f878e9aec2a07d3ddc08d1", "grade": false, "grade_id": "cell-be955a21a9add6d6", "locked": true, "schema_version": 1, "solution": false} # # Assignment: Dyna-Q and Dyna-Q+ # + [markdown] deletable=false editable=false nbgrader={"checksum": "ee25bdba2a442f7c9bb323036ff7adff", "grade": false, "grade_id": "cell-6ab578539f713801", "locked": true, "schema_version": 1, "solution": false} # Welcome to this programming assignment! In this notebook, you will: # 1. implement the Dyna-Q and Dyna-Q+ algorithms. # 2. compare their performance on an environment which changes to become 'better' than it was before, that is, the task becomes easier. # # We will give you the environment and infrastructure to run the experiment and visualize the performance. The assignment will be graded automatically by comparing the behavior of your agent to our implementations of the algorithms. The random seed will be set explicitly to avoid different behaviors due to randomness. # # Please go through the cells in order. # + [markdown] deletable=false editable=false nbgrader={"checksum": "93057554e87db5c7a4946ae3ba2d5951", "grade": false, "grade_id": "cell-9da2a3b80d5b1de4", "locked": true, "schema_version": 1, "solution": false} # ## The Shortcut Maze Environment # # In this maze environment, the goal is to reach the goal state (G) as fast as possible from the starting state (S). There are four actions – up, down, right, left – which take the agent deterministically from a state to the corresponding neighboring states, except when movement is blocked by a wall (denoted by grey) or the edge of the maze, in which case the agent remains where it is. The reward is +1 on reaching the goal state, 0 otherwise. On reaching the goal state G, the agent returns to the start state S to being a new episode. This is a discounted, episodic task with $\gamma = 0.95$. # # <img src="./images/shortcut_env.png" alt="environment" width="400"/> # # Later in the assignment, we will use a variant of this maze in which a 'shortcut' opens up after a certain number of timesteps. We will test if the the Dyna-Q and Dyna-Q+ agents are able to find the newly-opened shorter route to the goal state. # + [markdown] deletable=false editable=false nbgrader={"checksum": "f3979f4d8454c9e24f16ed133d08f086", "grade": false, "grade_id": "cell-d789ad9af5d734f5", "locked": true, "schema_version": 1, "solution": false} # ## Packages # # We import the following libraries that are required for this assignment. Primarily, we shall be using the following libraries: # 1. numpy: the fundamental package for scientific computing with Python. # 2. matplotlib: the library for plotting graphs in Python. # 3. RL-Glue: the library for reinforcement learning experiments. # # **Please do not import other libraries** — this will break the autograder. # + deletable=false editable=false nbgrader={"checksum": "16cd32791a2feb655dba393a8ec8e10d", "grade": false, "grade_id": "cell-9b59a5e962944c1a", "locked": true, "schema_version": 1, "solution": false} # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import os, jdc, shutil from tqdm import tqdm from rl_glue import RLGlue from agent import BaseAgent from maze_env import ShortcutMazeEnvironment # + deletable=false editable=false nbgrader={"checksum": "2a42e5b7f720fd0165a2b12b4c99f164", "grade": false, "grade_id": "cell-70ba6356f71f04d6", "locked": true, "schema_version": 1, "solution": false} plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'figure.figsize': [8,5]}) # + [markdown] deletable=false editable=false nbgrader={"checksum": "d9b325128e6aa23ac37b7af5193ab516", "grade": false, "grade_id": "cell-337c52496760f99a", "locked": true, "schema_version": 1, "solution": false} # ## Section 1: Dyna-Q # + [markdown] deletable=false editable=false nbgrader={"checksum": "663ef3e7cbb35c2f7bcc99e48697e20c", "grade": false, "grade_id": "cell-ae016536341366d9", "locked": true, "schema_version": 1, "solution": false} # Let's start with a quick recap of the tabular Dyna-Q algorithm. # # <div style="width:80%"><img src="./images/DynaQ.png" alt="DynaQ_pseudocode"></div> # # Dyna-Q involves four basic steps: # 1. Action selection: given an observation, select an action to be performed (here, using the $\epsilon$-greedy method). # 2. Direct RL: using the observed next state and reward, update the action values (here, using one-step tabular Q-learning). # 3. Model learning: using the observed next state and reward, update the model (here, updating a table as the environment is assumed to be deterministic). # 4. Planning: update the action values by generating $n$ simulated experiences using certain starting states and actions (here, using the random-sample one-step tabular Q-planning method). This is also known as the 'Indirect RL' step. The process of choosing the state and action to simulate an experience with is known as 'search control'. # # Steps 1 and 2 are parts of the [tabular Q-learning algorithm](http://www.incompleteideas.net/book/RLbook2018.pdf#page=153) and are denoted by line numbers (a)–(d) in the pseudocode above. Step 3 is performed in line (e), and Step 4 in the block of lines (f). # # We highly recommend revising the Dyna videos in the course and the material in the RL textbook (in particular, [Section 8.2](http://www.incompleteideas.net/book/RLbook2018.pdf#page=183)). # + [markdown] deletable=false editable=false nbgrader={"checksum": "354d65ba93c059d3100345383340a535", "grade": false, "grade_id": "cell-31575fcfa515756a", "locked": true, "schema_version": 1, "solution": false} # Alright, let's begin coding. # # As you already know by now, you will develop an agent which interacts with the given environment via RL-Glue. More specifically, you will implement the usual methods `agent_start`, `agent_step`, and `agent_end` in your `DynaQAgent` class, along with a couple of helper methods specific to Dyna-Q, namely `update_model` and `planning_step`. We will provide detailed comments in each method describing what your code should do. # + [markdown] deletable=false editable=false nbgrader={"checksum": "3aae482d1f386d62c362697ec771bf43", "grade": false, "grade_id": "cell-85f53bb3ebb9f77d", "locked": true, "schema_version": 1, "solution": false} # Let's break this down in pieces and do it one-by-one. # # First of all, check out the `agent_init` method below. As in earlier assignments, some of the attributes are initialized with the data passed inside `agent_info`. In particular, pay attention to the attributes which are new to `DynaQAgent`, since you shall be using them later. # + deletable=false editable=false nbgrader={"checksum": "cb3caf80070a7cce0f65a119522d2dbd", "grade": false, "grade_id": "cell-eda7a35e5ff3252f", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! class DynaQAgent(BaseAgent): def agent_init(self, agent_info): """Setup for the agent called when the experiment first starts. Args: agent_init_info (dict), the parameters used to initialize the agent. The dictionary contains: { num_states (int): The number of states, num_actions (int): The number of actions, epsilon (float): The parameter for epsilon-greedy exploration, step_size (float): The step-size, discount (float): The discount factor, planning_steps (int): The number of planning steps per environmental interaction random_seed (int): the seed for the RNG used in epsilon-greedy planning_random_seed (int): the seed for the RNG used in the planner } """ # First, we get the relevant information from agent_info # NOTE: we use np.random.RandomState(seed) to set the two different RNGs # for the planner and the rest of the code try: self.num_states = agent_info["num_states"] self.num_actions = agent_info["num_actions"] except: print("You need to pass both 'num_states' and 'num_actions' \ in agent_info to initialize the action-value table") self.gamma = agent_info.get("discount", 0.95) self.step_size = agent_info.get("step_size", 0.1) self.epsilon = agent_info.get("epsilon", 0.1) self.planning_steps = agent_info.get("planning_steps", 10) self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42)) self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42)) # Next, we initialize the attributes required by the agent, e.g., q_values, model, etc. # A simple way to implement the model is to have a dictionary of dictionaries, # mapping each state to a dictionary which maps actions to (reward, next state) tuples. self.q_values = np.zeros((self.num_states, self.num_actions)) self.actions = list(range(self.num_actions)) self.past_action = -1 self.past_state = -1 self.model = {} # model is a dictionary of dictionaries, which maps states to actions to # (reward, next_state) tuples # + [markdown] deletable=false editable=false nbgrader={"checksum": "c8f49f199eb2c0bf11cba86f0b5d3c6e", "grade": false, "grade_id": "cell-02a81d97cf066622", "locked": true, "schema_version": 1, "solution": false} # Now let's create the `update_model` method, which performs the 'Model Update' step in the pseudocode. It takes a `(s, a, s', r)` tuple and stores the next state and reward corresponding to a state-action pair. # # Remember, because the environment is deterministic, an easy way to implement the model is to have a dictionary of encountered states, each mapping to a dictionary of actions taken in those states, which in turn maps to a tuple of next state and reward. In this way, the model can be easily accessed by `model[s][a]`, which would return the `(s', r)` tuple. # + deletable=false nbgrader={"checksum": "0623843f71cad67461cc0b5dd7b1751c", "grade": false, "grade_id": "cell-ca7cf60690bd1e62", "locked": false, "schema_version": 1, "solution": true} # %%add_to DynaQAgent # [GRADED] def update_model(self, past_state, past_action, state, reward): """updates the model Args: past_state (int): s past_action (int): a state (int): s' reward (int): r Returns: Nothing """ # Update the model with the (s,a,s',r) tuple (1~4 lines) ### START CODE HERE ### if past_state not in self.model: self.model.update({past_state:{past_action:(state, reward)}}) else: self.model[past_state].update({past_action:(state, reward)}) # self.model[past_state][past_action] = (state, reward) ### END CODE HERE ### # + [markdown] deletable=false editable=false nbgrader={"checksum": "a67870254bf6049b72911dcbe495ad84", "grade": false, "grade_id": "cell-feda394cc8d0d0f0", "locked": true, "schema_version": 1, "solution": false} # ### Test `update_model()` # + deletable=false editable=false nbgrader={"checksum": "4f4aeb91268cae5d6169083963dd4460", "grade": true, "grade_id": "DynaQ_update_model", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for update_model() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "random_seed": 0, "planning_random_seed": 0} test_agent = DynaQAgent() test_agent.agent_init(agent_info) test_agent.update_model(0,2,0,1) test_agent.update_model(2,0,1,1) test_agent.update_model(0,3,1,2) print("Model: \n", test_agent.model) # + [markdown] deletable=false editable=false nbgrader={"checksum": "58a644632b6aea7d2850370ab1066b9e", "grade": false, "grade_id": "cell-47eee79fcc885cb6", "locked": true, "schema_version": 1, "solution": false} # Expected output: # ``` # Model: # {0: {2: (0, 1), 3: (1, 2)}, 2: {0: (1, 1)}} # ``` # + [markdown] deletable=false editable=false nbgrader={"checksum": "b49de532a3a274adce7ebf6169961296", "grade": false, "grade_id": "cell-21e26a912d8b58f2", "locked": true, "schema_version": 1, "solution": false} # Next, you will implement the planning step, the crux of the Dyna-Q algorithm. You shall be calling this `planning_step` method at every timestep of every trajectory. # + deletable=false nbgrader={"checksum": "cffbe7372919f7569a283aad349cc76c", "grade": false, "grade_id": "cell-299f48859b38c670", "locked": false, "schema_version": 1, "solution": true} # %%add_to DynaQAgent # [GRADED] def planning_step(self): """performs planning, i.e. indirect RL. Args: None Returns: Nothing """ # The indirect RL step: # - Choose a state and action from the set of experiences that are stored in the model. (~2 lines) # - Query the model with this state-action pair for the predicted next state and reward.(~1 line) # - Update the action values with this simulated experience. (2~4 lines) # - Repeat for the required number of planning steps. # # Note that the update equation is different for terminal and non-terminal transitions. # To differentiate between a terminal and a non-terminal next state, assume that the model stores # the terminal state as a dummy state like -1 # # Important: remember you have a random number generator 'planning_rand_generator' as # a part of the class which you need to use as self.planning_rand_generator.choice() # For the sake of reproducibility and grading, *do not* use anything else like # np.random.choice() for performing search control. ### START CODE HERE ### for i in range(self.planning_steps): s = self.planning_rand_generator.choice(list(self.model.keys())) a = self.planning_rand_generator.choice(list(self.model[s].keys())) s_new, r = self.model[s][a] if s_new != -1: target = r + self.gamma*np.max(self.q_values[s_new]) else: target = r self.q_values[s, a] = self.q_values[s,a] + self.step_size*(target - self.q_values[s, a]) ### END CODE HERE ### # + [markdown] deletable=false editable=false nbgrader={"checksum": "022cff5daeb2a0885bec5f66231b0233", "grade": false, "grade_id": "cell-deb5f5adef22b4e0", "locked": true, "schema_version": 1, "solution": false} # ### Test `planning_step()` # + deletable=false editable=false nbgrader={"checksum": "e7b50b6709c1796e9649880aa88b4fcc", "grade": true, "grade_id": "DynaQ_planning_step", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for planning_step() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "planning_steps": 4, "random_seed": 0, "planning_random_seed": 5} test_agent = DynaQAgent() test_agent.agent_init(agent_info) test_agent.update_model(0,2,1,1) test_agent.update_model(2,0,1,1) test_agent.update_model(0,3,0,1) test_agent.update_model(0,1,-1,1) test_agent.planning_step() print("Model: \n", test_agent.model) print("Action-value estimates: \n", test_agent.q_values) # + [markdown] deletable=false editable=false nbgrader={"checksum": "cafc07b98a92e3fd29e912aca8a1d2d3", "grade": false, "grade_id": "cell-2b479d946144873d", "locked": true, "schema_version": 1, "solution": false} # Expected output: # ``` # Model: # {0: {2: (1, 1), 3: (0, 1), 1: (-1, 1)}, 2: {0: (1, 1)}} # Action-value estimates: # [[0. 0.1 0. 0.2 ] # [0. 0. 0. 0. ] # [0.1 0. 0. 0. ]] # ``` # # If your output does not match the above, one of the first things to check is to make sure that you haven't changed the `planning_random_seed` in the test cell. Additionally, make sure you have handled terminal updates correctly. # + [markdown] deletable=false editable=false nbgrader={"checksum": "143ef17b6c34ca12c1528346ad7af4d0", "grade": false, "grade_id": "cell-19299355538e166b", "locked": true, "schema_version": 1, "solution": false} # Now before you move on to implement the rest of the agent methods, here are the helper functions that you've used in the previous assessments for choosing an action using an $\epsilon$-greedy policy. # + deletable=false editable=false nbgrader={"checksum": "1ab2ef4f11b78d87270f396028252b74", "grade": false, "grade_id": "cell-9fc1453e5bc78ee0", "locked": true, "schema_version": 1, "solution": false} # %%add_to DynaQAgent # Do not modify this cell! def argmax(self, q_values): """argmax with random tie-breaking Args: q_values (Numpy array): the array of action values Returns: action (int): an action with the highest value """ top = float("-inf") ties = [] for i in range(len(q_values)): if q_values[i] > top: top = q_values[i] ties = [] if q_values[i] == top: ties.append(i) return self.rand_generator.choice(ties) def choose_action_egreedy(self, state): """returns an action using an epsilon-greedy policy w.r.t. the current action-value function. Important: assume you have a random number generator 'rand_generator' as a part of the class which you can use as self.rand_generator.choice() or self.rand_generator.rand() Args: state (List): coordinates of the agent (two elements) Returns: The action taken w.r.t. the aforementioned epsilon-greedy policy """ if self.rand_generator.rand() < self.epsilon: action = self.rand_generator.choice(self.actions) else: values = self.q_values[state] action = self.argmax(values) return action # + [markdown] deletable=false editable=false nbgrader={"checksum": "f836d363c2f53ad316e4d3be6958e543", "grade": false, "grade_id": "cell-49b0839e77c116f3", "locked": true, "schema_version": 1, "solution": false} # Next, you will implement the rest of the agent-related methods, namely `agent_start`, `agent_step`, and `agent_end`. # + deletable=false nbgrader={"checksum": "3d5e4eadfae22a3707932558a68f90f2", "grade": false, "grade_id": "cell-d7cd67287d3a4d59", "locked": false, "schema_version": 1, "solution": true} # %%add_to DynaQAgent # [GRADED] def agent_start(self, state): """The first method called when the experiment starts, called after the environment starts. Args: state (Numpy array): the state from the environment's env_start function. Returns: (int) the first action the agent takes. """ # given the state, select the action using self.choose_action_egreedy()), # and save current state and action (~2 lines) ### self.past_state = ? ### self.past_action = ? ### START CODE HERE ### self.past_action = self.choose_action_egreedy(state) self.past_state = state ### END CODE HERE ### return self.past_action def agent_step(self, reward, state): """A step taken by the agent. Args: reward (float): the reward received for taking the last action taken state (Numpy array): the state from the environment's step based on where the agent ended up after the last step Returns: (int) The action the agent takes given this state. """ # - Direct-RL step (~1-3 lines) # - Model Update step (~1 line) # - `planning_step` (~1 line) # - Action Selection step (~1 line) # Save the current state and action before returning the action to be performed. (~2 lines) ### START CODE HERE ### s,a = self.past_state, self.past_action target = reward + self.gamma*np.max(self.q_values[state]) self.q_values[s, a] = self.q_values[s,a] + self.step_size*(target - self.q_values[s, a]) self.update_model(s,a, state, reward) self.planning_step() action = self.choose_action_egreedy(state) self.past_state = state self.past_action = action ### END CODE HERE ### return self.past_action def agent_end(self, reward): """Called when the agent terminates. Args: reward (float): the reward the agent received for entering the terminal state. """ # - Direct RL update with this final transition (1~2 lines) # - Model Update step with this final transition (~1 line) # - One final `planning_step` (~1 line) # # Note: the final transition needs to be handled carefully. Since there is no next state, # you will have to pass a dummy state (like -1), which you will be using in the planning_step() to # differentiate between updates with usual terminal and non-terminal transitions. ### START CODE HERE ### s,a = self.past_state, self.past_action target = reward self.q_values[s, a] = self.q_values[s,a] + self.step_size*(target - self.q_values[s, a]) self.update_model(s,a, -1, reward) self.planning_step() ### END CODE HERE ### # + [markdown] deletable=false editable=false nbgrader={"checksum": "14b7a6eacec0a3564a156c15ed4cc96c", "grade": false, "grade_id": "cell-cb04070be7e98178", "locked": true, "schema_version": 1, "solution": false} # ### Test `agent_start()` # + deletable=false editable=false nbgrader={"checksum": "241515d8eb09b5d0ca96c1c60a24af07", "grade": true, "grade_id": "DynaQ_agent_start", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for agent_start() ## agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "random_seed": 0, "planning_random_seed": 0} test_agent = DynaQAgent() test_agent.agent_init(agent_info) action = test_agent.agent_start(0) print("Action:", action) print("Model: \n", test_agent.model) print("Action-value estimates: \n", test_agent.q_values) # + [markdown] deletable=false editable=false nbgrader={"checksum": "f2c06652b1c989ff3174e42a463173ac", "grade": false, "grade_id": "cell-bc7046affcf9c2f9", "locked": true, "schema_version": 1, "solution": false} # Expected output: # ``` # Action: 1 # Model: # {} # Action-value estimates: # [[0. 0. 0. 0.] # [0. 0. 0. 0.] # [0. 0. 0. 0.]] # ``` # + [markdown] deletable=false editable=false nbgrader={"checksum": "b5b76c8ebc0936d8ca8b929d1721fe44", "grade": false, "grade_id": "cell-069a254ee4ba6e25", "locked": true, "schema_version": 1, "solution": false} # ### Test `agent_step()` # + deletable=false editable=false nbgrader={"checksum": "24bf8b0fdf3408e6a36ea968df0c6a36", "grade": true, "grade_id": "DynaQ__agent_step", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for agent_step() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "planning_steps": 2, "random_seed": 0, "planning_random_seed": 0} test_agent = DynaQAgent() test_agent.agent_init(agent_info) actions.append(test_agent.agent_start(0)) actions.append(test_agent.agent_step(1,2)) actions.append(test_agent.agent_step(0,1)) print("Actions:", actions) print("Model: \n", test_agent.model) print("Action-value estimates: \n", test_agent.q_values) # + [markdown] deletable=false editable=false nbgrader={"checksum": "a47eae38f5b82056b69ea71037a76465", "grade": false, "grade_id": "cell-0b8605acd440fc7d", "locked": true, "schema_version": 1, "solution": false} # Expected output: # # ``` # Actions: [1, 3, 1] # Model: # {0: {1: (2, 1)}, 2: {3: (1, 0)}} # Action-value estimates: # [[0. 0.3439 0. 0. ] # [0. 0. 0. 0. ] # [0. 0. 0. 0. ]] # ``` # + [markdown] deletable=false editable=false nbgrader={"checksum": "c1fee78afea91645becfd5b193b4b9ab", "grade": false, "grade_id": "cell-8d3e06f7f489a49c", "locked": true, "schema_version": 1, "solution": false} # ### Test `agent_end()` # + deletable=false editable=false nbgrader={"checksum": "b8e64a1aa1ab9917e78149b1ab4fc9e8", "grade": true, "grade_id": "DynaQ_agent_end", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for agent_end() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "planning_steps": 2, "random_seed": 0, "planning_random_seed": 0} test_agent = DynaQAgent() test_agent.agent_init(agent_info) actions.append(test_agent.agent_start(0)) actions.append(test_agent.agent_step(1,2)) actions.append(test_agent.agent_step(0,1)) test_agent.agent_end(1) print("Actions:", actions) print("Model: \n", test_agent.model) print("Action-value Estimates: \n", test_agent.q_values) # + [markdown] deletable=false editable=false nbgrader={"checksum": "43711ecd45a75f4ee1f31ac3f5127477", "grade": false, "grade_id": "cell-25bdfd8dc303b1e7", "locked": true, "schema_version": 1, "solution": false} # Expected output: # # ``` # Actions: [1, 3, 1] # Model: # {0: {1: (2, 1)}, 2: {3: (1, 0)}, 1: {1: (-1, 1)}} # Action-value Estimates: # [[0. 0.41051 0. 0. ] # [0. 0.1 0. 0. ] # [0. 0. 0. 0.01 ]] # # ``` # + [markdown] deletable=false editable=false nbgrader={"checksum": "12f59fa0e03a4b5df596a73e3dae0c02", "grade": false, "grade_id": "cell-329423dc3230312d", "locked": true, "schema_version": 1, "solution": false} # ### Experiment: Dyna-Q agent in the maze environment # # Alright. Now we have all the components of the `DynaQAgent` ready. Let's try it out on the maze environment! # # The next cell runs an experiment on this maze environment to test your implementation. The initial action values are $0$, the step-size parameter is $0.125$. and the exploration parameter is $\epsilon=0.1$. After the experiment, the sum of rewards in each episode should match the correct result. # # We will try planning steps of $0,5,50$ and compare their performance in terms of the average number of steps taken to reach the goal state in the aforementioned maze environment. For scientific rigor, we will run each experiment $30$ times. In each experiment, we set the initial random-number-generator (RNG) seeds for a fair comparison across algorithms. # + deletable=false editable=false nbgrader={"checksum": "a2573b31bd8cce3a89beba2ec09ab2c3", "grade": false, "grade_id": "cell-28355ff7447c705f", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! def run_experiment(env, agent, env_parameters, agent_parameters, exp_parameters): # Experiment settings num_runs = exp_parameters['num_runs'] num_episodes = exp_parameters['num_episodes'] planning_steps_all = agent_parameters['planning_steps'] env_info = env_parameters agent_info = {"num_states" : agent_parameters["num_states"], # We pass the agent the information it needs. "num_actions" : agent_parameters["num_actions"], "epsilon": agent_parameters["epsilon"], "discount": env_parameters["discount"], "step_size" : agent_parameters["step_size"]} all_averages = np.zeros((len(planning_steps_all), num_runs, num_episodes)) # for collecting metrics log_data = {'planning_steps_all' : planning_steps_all} # that shall be plotted later for idx, planning_steps in enumerate(planning_steps_all): print('Planning steps : ', planning_steps) os.system('sleep 0.5') # to prevent tqdm printing out-of-order before the above print() agent_info["planning_steps"] = planning_steps for i in tqdm(range(num_runs)): agent_info['random_seed'] = i agent_info['planning_random_seed'] = i rl_glue = RLGlue(env, agent) # Creates a new RLGlue experiment with the env and agent we chose above rl_glue.rl_init(agent_info, env_info) # We pass RLGlue what it needs to initialize the agent and environment for j in range(num_episodes): rl_glue.rl_start() # We start an episode. Here we aren't using rl_glue.rl_episode() # like the other assessments because we'll be requiring some is_terminal = False # data from within the episodes in some of the experiments here num_steps = 0 while not is_terminal: reward, _, action, is_terminal = rl_glue.rl_step() # The environment and agent take a step num_steps += 1 # and return the reward and action taken. all_averages[idx][i][j] = num_steps log_data['all_averages'] = all_averages np.save("results/Dyna-Q_planning_steps", log_data) def plot_steps_per_episode(file_path): data = np.load(file_path).item() all_averages = data['all_averages'] planning_steps_all = data['planning_steps_all'] for i, planning_steps in enumerate(planning_steps_all): plt.plot(np.mean(all_averages[i], axis=0), label='Planning steps = '+str(planning_steps)) plt.legend(loc='upper right') plt.xlabel('Episodes') plt.ylabel('Steps\nper\nepisode', rotation=0, labelpad=40) plt.axhline(y=16, linestyle='--', color='grey', alpha=0.4) plt.show() # + # Do NOT modify the parameter settings. # Experiment parameters experiment_parameters = { "num_runs" : 30, # The number of times we run the experiment "num_episodes" : 40, # The number of episodes per experiment } # Environment parameters environment_parameters = { "discount": 0.95, } # Agent parameters agent_parameters = { "num_states" : 54, "num_actions" : 4, "epsilon": 0.1, "step_size" : 0.125, "planning_steps" : [0, 5, 50] # The list of planning_steps we want to try } current_env = ShortcutMazeEnvironment # The environment current_agent = DynaQAgent # The agent run_experiment(current_env, current_agent, environment_parameters, agent_parameters, experiment_parameters) plot_steps_per_episode('results/Dyna-Q_planning_steps.npy') shutil.make_archive('results', 'zip', 'results'); # + [markdown] deletable=false editable=false nbgrader={"checksum": "b78255378e333ad6a4b69eadb1c5bce9", "grade": false, "grade_id": "cell-e55bf393c9e5a94b", "locked": true, "schema_version": 1, "solution": false} # What do you notice? # # As the number of planning steps increases, the number of episodes taken to reach the goal decreases rapidly. Remember that the RNG seed was set the same for all the three values of planning steps, resulting in the same number of steps taken to reach the goal in the first episode. Thereafter, the performance improves. The slowest improvement is when there are $n=0$ planning steps, i.e., for the non-planning Q-learning agent, even though the step size parameter was optimized for it. Note that the grey dotted line shows the minimum number of steps required to reach the goal state under the optimal greedy policy. # # --- # # + [markdown] deletable=false editable=false nbgrader={"checksum": "38982e501241998792ae2aebe105c47b", "grade": false, "grade_id": "cell-56f6a9492acc5115", "locked": true, "schema_version": 1, "solution": false} # ### Experiment(s): Dyna-Q agent in the _changing_ maze environment # + [markdown] deletable=false editable=false nbgrader={"checksum": "28e53909e26c756c30000e1a0f0d9b4c", "grade": false, "grade_id": "cell-64cbd79abcf74fce", "locked": true, "schema_version": 1, "solution": false} # Great! Now let us see how Dyna-Q performs on the version of the maze in which a shorter path opens up after 3000 steps. The rest of the transition and reward dynamics remain the same. # # <img src="./images/shortcut_env_after.png" alt="environment" width="800"/> # # Before you proceed, take a moment to think about what you expect to see. Will Dyna-Q find the new, shorter path to the goal? If so, why? If not, why not? # + deletable=false editable=false nbgrader={"checksum": "fd85c37c0082135d539e4160d4e07949", "grade": false, "grade_id": "cell-8f6730285bc8288e", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! def run_experiment_with_state_visitations(env, agent, env_parameters, agent_parameters, exp_parameters, result_file_name): # Experiment settings num_runs = exp_parameters['num_runs'] num_max_steps = exp_parameters['num_max_steps'] planning_steps_all = agent_parameters['planning_steps'] env_info = {"change_at_n" : env_parameters["change_at_n"]} agent_info = {"num_states" : agent_parameters["num_states"], "num_actions" : agent_parameters["num_actions"], "epsilon": agent_parameters["epsilon"], "discount": env_parameters["discount"], "step_size" : agent_parameters["step_size"]} state_visits_before_change = np.zeros((len(planning_steps_all), num_runs, 54)) # For saving the number of state_visits_after_change = np.zeros((len(planning_steps_all), num_runs, 54)) # state-visitations cum_reward_all = np.zeros((len(planning_steps_all), num_runs, num_max_steps)) # For saving the cumulative reward log_data = {'planning_steps_all' : planning_steps_all} for idx, planning_steps in enumerate(planning_steps_all): print('Planning steps : ', planning_steps) os.system('sleep 1') # to prevent tqdm printing out-of-order before the above print() agent_info["planning_steps"] = planning_steps # We pass the agent the information it needs. for run in tqdm(range(num_runs)): agent_info['random_seed'] = run agent_info['planning_random_seed'] = run rl_glue = RLGlue(env, agent) # Creates a new RLGlue experiment with the env and agent we chose above rl_glue.rl_init(agent_info, env_info) # We pass RLGlue what it needs to initialize the agent and environment num_steps = 0 cum_reward = 0 while num_steps < num_max_steps-1 : state, _ = rl_glue.rl_start() # We start the experiment. We'll be collecting the is_terminal = False # state-visitation counts to visiualize the learned policy if num_steps < env_parameters["change_at_n"]: state_visits_before_change[idx][run][state] += 1 else: state_visits_after_change[idx][run][state] += 1 while not is_terminal and num_steps < num_max_steps-1 : reward, state, action, is_terminal = rl_glue.rl_step() num_steps += 1 cum_reward += reward cum_reward_all[idx][run][num_steps] = cum_reward if num_steps < env_parameters["change_at_n"]: state_visits_before_change[idx][run][state] += 1 else: state_visits_after_change[idx][run][state] += 1 log_data['state_visits_before'] = state_visits_before_change log_data['state_visits_after'] = state_visits_after_change log_data['cum_reward_all'] = cum_reward_all np.save("results/" + result_file_name, log_data) def plot_cumulative_reward(file_path, item_key, y_key, y_axis_label, legend_prefix, title): data_all = np.load(file_path).item() data_y_all = data_all[y_key] items = data_all[item_key] for i, item in enumerate(items): plt.plot(np.mean(data_y_all[i], axis=0), label=legend_prefix+str(item)) plt.axvline(x=3000, linestyle='--', color='grey', alpha=0.4) plt.xlabel('Timesteps') plt.ylabel(y_axis_label, rotation=0, labelpad=60) plt.legend(loc='upper left') plt.title(title) plt.show() # + [markdown] deletable=false editable=false nbgrader={"checksum": "e463255ee451c8ca255ed3f24dd96bf4", "grade": false, "grade_id": "cell-7a4965729e7c41f3", "locked": true, "schema_version": 1, "solution": false} # Did you notice that the environment changes after a fixed number of _steps_ and not episodes? # # This is because the environment is separate from the agent, and the environment changes irrespective of the length of each episode (i.e., the number of environmental interactions per episode) that the agent perceives. And hence we are now plotting the data per step or interaction of the agent and the environment, in order to comfortably see the differences in the behaviours of the agents before and after the environment changes. # + [markdown] deletable=false editable=false nbgrader={"checksum": "e5a10d7810edcc38ed364530e320e5b7", "grade": false, "grade_id": "cell-1585cb7119e3b66d", "locked": true, "schema_version": 1, "solution": false} # Okay, now we will first plot the cumulative reward obtained by the agent per interaction with the environment, averaged over 10 runs of the experiment on this changing world. # + # Do NOT modify the parameter settings. # Experiment parameters experiment_parameters = { "num_runs" : 10, # The number of times we run the experiment "num_max_steps" : 6000, # The number of steps per experiment } # Environment parameters environment_parameters = { "discount": 0.95, "change_at_n": 3000 } # Agent parameters agent_parameters = { "num_states" : 54, "num_actions" : 4, "epsilon": 0.1, "step_size" : 0.125, "planning_steps" : [5, 10, 50] # The list of planning_steps we want to try } current_env = ShortcutMazeEnvironment # The environment current_agent = DynaQAgent # The agent run_experiment_with_state_visitations(current_env, current_agent, environment_parameters, agent_parameters, experiment_parameters, "Dyna-Q_shortcut_steps") plot_cumulative_reward('results/Dyna-Q_shortcut_steps.npy', 'planning_steps_all', 'cum_reward_all', 'Cumulative\nreward', 'Planning steps = ', 'Dyna-Q : Varying planning_steps') # + [markdown] deletable=false editable=false nbgrader={"checksum": "2ce0809c6896af47bf2391c654533a42", "grade": false, "grade_id": "cell-bdab2f4622d3890b", "locked": true, "schema_version": 1, "solution": false} # We observe that the slope of the curves is almost constant. If the agent had discovered the shortcut and begun using it, we would expect to see an increase in the slope of the curves towards the later stages of training. This is because the agent can get to the goal state faster and get the positive reward. Note that the timestep at which the shortcut opens up is marked by the grey dotted line. # # Note that this trend is constant across the increasing number of planning steps. # # Now let's check the heatmap of the state visitations of the agent with `planning_steps=10` during training, before and after the shortcut opens up after 3000 timesteps. # + deletable=false editable=false nbgrader={"checksum": "2ebb3b3a6fa7c60ee1f35412d3b35e74", "grade": false, "grade_id": "cell-36a0d9e197e4f128", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! def plot_state_visitations(file_path, plot_titles, idx): data = np.load(file_path).item() data_keys = ["state_visits_before", "state_visits_after"] positions = [211,212] titles = plot_titles wall_ends = [None,-1] for i in range(2): state_visits = data[data_keys[i]][idx] average_state_visits = np.mean(state_visits, axis=0) grid_state_visits = np.rot90(average_state_visits.reshape((6,9)).T) grid_state_visits[2,1:wall_ends[i]] = np.nan # walls #print(average_state_visits.reshape((6,9))) plt.subplot(positions[i]) plt.pcolormesh(grid_state_visits, edgecolors='gray', linewidth=1, cmap='viridis') plt.text(3+0.5, 0+0.5, 'S', horizontalalignment='center', verticalalignment='center') plt.text(8+0.5, 5+0.5, 'G', horizontalalignment='center', verticalalignment='center') plt.title(titles[i]) plt.axis('off') cm = plt.get_cmap() cm.set_bad('gray') plt.subplots_adjust(bottom=0.0, right=0.7, top=1.0) cax = plt.axes([1., 0.0, 0.075, 1.]) cbar = plt.colorbar(cax=cax) plt.show() # + # Do not modify this cell! plot_state_visitations("results/Dyna-Q_shortcut_steps.npy", ['Dyna-Q : State visitations before the env changes', 'Dyna-Q : State visitations after the env changes'], 1) # + [markdown] deletable=false editable=false nbgrader={"checksum": "15aded4269ccf83bd3fa9b4ca9be353c", "grade": false, "grade_id": "cell-61bd2cfdba9cc49d", "locked": true, "schema_version": 1, "solution": false} # What do you observe? # # The state visitation map looks almost the same before and after the shortcut opens. This means that the Dyna-Q agent hasn't quite discovered and started exploiting the new shortcut. # # Now let's try increasing the exploration parameter $\epsilon$ to see if it helps the Dyna-Q agent discover the shortcut. # + deletable=false editable=false nbgrader={"checksum": "c97f95a59de3b9000e564c23dd1e8a6e", "grade": false, "grade_id": "cell-c9eab4ed4cf50870", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! def run_experiment_only_cumulative_reward(env, agent, env_parameters, agent_parameters, exp_parameters): # Experiment settings num_runs = exp_parameters['num_runs'] num_max_steps = exp_parameters['num_max_steps'] epsilons = agent_parameters['epsilons'] env_info = {"change_at_n" : env_parameters["change_at_n"]} agent_info = {"num_states" : agent_parameters["num_states"], "num_actions" : agent_parameters["num_actions"], "planning_steps": agent_parameters["planning_steps"], "discount": env_parameters["discount"], "step_size" : agent_parameters["step_size"]} log_data = {'epsilons' : epsilons} cum_reward_all = np.zeros((len(epsilons), num_runs, num_max_steps)) for eps_idx, epsilon in enumerate(epsilons): print('Agent : Dyna-Q, epsilon : %f' % epsilon) os.system('sleep 1') # to prevent tqdm printing out-of-order before the above print() agent_info["epsilon"] = epsilon for run in tqdm(range(num_runs)): agent_info['random_seed'] = run agent_info['planning_random_seed'] = run rl_glue = RLGlue(env, agent) # Creates a new RLGlue experiment with the env and agent we chose above rl_glue.rl_init(agent_info, env_info) # We pass RLGlue what it needs to initialize the agent and environment num_steps = 0 cum_reward = 0 while num_steps < num_max_steps-1 : rl_glue.rl_start() # We start the experiment is_terminal = False while not is_terminal and num_steps < num_max_steps-1 : reward, _, action, is_terminal = rl_glue.rl_step() # The environment and agent take a step and return # the reward, and action taken. num_steps += 1 cum_reward += reward cum_reward_all[eps_idx][run][num_steps] = cum_reward log_data['cum_reward_all'] = cum_reward_all np.save("results/Dyna-Q_epsilons", log_data) # + # Do NOT modify the parameter settings. # Experiment parameters experiment_parameters = { "num_runs" : 30, # The number of times we run the experiment "num_max_steps" : 6000, # The number of steps per experiment } # Environment parameters environment_parameters = { "discount": 0.95, "change_at_n": 3000 } # Agent parameters agent_parameters = { "num_states" : 54, "num_actions" : 4, "step_size" : 0.125, "planning_steps" : 10, "epsilons": [0.1, 0.2, 0.4, 0.8] # The list of epsilons we want to try } current_env = ShortcutMazeEnvironment # The environment current_agent = DynaQAgent # The agent run_experiment_only_cumulative_reward(current_env, current_agent, environment_parameters, agent_parameters, experiment_parameters) plot_cumulative_reward('results/Dyna-Q_epsilons.npy', 'epsilons', 'cum_reward_all', 'Cumulative\nreward', r'$\epsilon$ = ', r'Dyna-Q : Varying $\epsilon$') # + [markdown] deletable=false editable=false nbgrader={"checksum": "7587c4a396d64d011ff5d8cf0f755f43", "grade": false, "grade_id": "cell-75b928a3930343ef", "locked": true, "schema_version": 1, "solution": false} # What do you observe? # # Increasing the exploration via the $\epsilon$-greedy strategy does not seem to be helping. In fact, the agent's cumulative reward decreases because it is spending more and more time trying out the exploratory actions. # # Can we do better...? # + [markdown] deletable=false editable=false nbgrader={"checksum": "fea0fa31931e764395600778cacbde8d", "grade": false, "grade_id": "cell-53c7b261289030c7", "locked": true, "schema_version": 1, "solution": false} # ## Section 2: Dyna-Q+ # + [markdown] deletable=false editable=false nbgrader={"checksum": "c201b6bae38cb088c3c3cbc51810c914", "grade": false, "grade_id": "cell-1ed17a58ff98db6f", "locked": true, "schema_version": 1, "solution": false} # The motivation behind Dyna-Q+ is to give a bonus reward for actions that haven't been tried for a long time, since there is a greater chance that the dynamics for that actions might have changed. # # In particular, if the modeled reward for a transition is $r$, and the transition has not been tried in $\tau(s,a)$ time steps, then planning updates are done as if that transition produced a reward of $r + \kappa \sqrt{ \tau(s,a)}$, for some small $\kappa$. # # Let's implement that! # # Based on your `DynaQAgent`, create a new class `DynaQPlusAgent` to implement the aforementioned exploration heuristic. Additionally : # 1. actions that had never been tried before from a state should now be allowed to be considered in the planning step, # 2. and the initial model for such actions is that they lead back to the same state with a reward of zero. # # At this point, you might want to refer to the video lectures and [Section 8.3](http://www.incompleteideas.net/book/RLbook2018.pdf#page=188) of the RL textbook for a refresher on Dyna-Q+. # + [markdown] deletable=false editable=false nbgrader={"checksum": "ebbfc778eab63bdff736bc478ebc5928", "grade": false, "grade_id": "cell-ede9dc6883c45c67", "locked": true, "schema_version": 1, "solution": false} # As usual, let's break this down in pieces and do it one-by-one. # # First of all, check out the `agent_init` method below. In particular, pay attention to the attributes which are new to `DynaQPlusAgent`– state-visitation counts $\tau$ and the scaling parameter $\kappa$ – because you shall be using them later. # + deletable=false editable=false nbgrader={"checksum": "53479b7ba60db3596b74755d1319b574", "grade": false, "grade_id": "cell-45b5c95ae385f669", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! class DynaQPlusAgent(BaseAgent): def agent_init(self, agent_info): """Setup for the agent called when the experiment first starts. Args: agent_init_info (dict), the parameters used to initialize the agent. The dictionary contains: { num_states (int): The number of states, num_actions (int): The number of actions, epsilon (float): The parameter for epsilon-greedy exploration, step_size (float): The step-size, discount (float): The discount factor, planning_steps (int): The number of planning steps per environmental interaction kappa (float): The scaling factor for the reward bonus random_seed (int): the seed for the RNG used in epsilon-greedy planning_random_seed (int): the seed for the RNG used in the planner } """ # First, we get the relevant information from agent_info # Note: we use np.random.RandomState(seed) to set the two different RNGs # for the planner and the rest of the code try: self.num_states = agent_info["num_states"] self.num_actions = agent_info["num_actions"] except: print("You need to pass both 'num_states' and 'num_actions' \ in agent_info to initialize the action-value table") self.gamma = agent_info.get("discount", 0.95) self.step_size = agent_info.get("step_size", 0.1) self.epsilon = agent_info.get("epsilon", 0.1) self.planning_steps = agent_info.get("planning_steps", 10) self.kappa = agent_info.get("kappa", 0.001) self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42)) self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42)) # Next, we initialize the attributes required by the agent, e.g., q_values, model, tau, etc. # The visitation-counts can be stored as a table as well, like the action values self.q_values = np.zeros((self.num_states, self.num_actions)) self.tau = np.zeros((self.num_states, self.num_actions)) self.actions = list(range(self.num_actions)) self.past_action = -1 self.past_state = -1 self.model = {} # + [markdown] deletable=false editable=false nbgrader={"checksum": "ae86189d377dcbeb226bd8f01839be62", "grade": false, "grade_id": "cell-38c00ecba461bf92", "locked": true, "schema_version": 1, "solution": false} # Now first up, implement the `update_model` method. Note that this is different from Dyna-Q in the aforementioned way. # # + deletable=false nbgrader={"checksum": "977dc282839bdb10b454c2ff698b58e6", "grade": false, "grade_id": "cell-b65a5bb0b37ceb84", "locked": false, "schema_version": 1, "solution": true} # %%add_to DynaQPlusAgent # [GRADED] def update_model(self, past_state, past_action, state, reward): """updates the model Args: past_state (int): s past_action (int): a state (int): s' reward (int): r Returns: Nothing """ # Recall that when adding a state-action to the model, if the agent is visiting the state # for the first time, then the remaining actions need to be added to the model as well # with zero reward and a transition into itself. Something like: ## for action in self.actions: ## if action != past_action: ## self.model[past_state][action] = (past_state, 0) # # Note: do *not* update the visitation-counts here. We will do that in `agent_step`. # # (3 lines) if past_state not in self.model: self.model[past_state] = {past_action : (state, reward)} ### START CODE HERE ### for action in self.actions: if action != past_action: self.model[past_state][action] = (past_state, 0) ### END CODE HERE ### else: self.model[past_state][past_action] = (state, reward) # + [markdown] deletable=false editable=false nbgrader={"checksum": "b319274d17d4f236253245cbafc2f2c9", "grade": false, "grade_id": "cell-817a09952176290c", "locked": true, "schema_version": 1, "solution": false} # ### Test `update_model()` # + deletable=false editable=false nbgrader={"checksum": "b51603111ba2020112cbdcc427c225b0", "grade": true, "grade_id": "DynaQPlus_update_model", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for update_model() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "random_seed": 0, "planning_random_seed": 0} test_agent = DynaQPlusAgent() test_agent.agent_init(agent_info) test_agent.update_model(0,2,0,1) test_agent.update_model(2,0,1,1) test_agent.update_model(0,3,1,2) test_agent.tau[0][0] += 1 print("Model: \n", test_agent.model) # + [markdown] deletable=false editable=false nbgrader={"checksum": "d5fac20afac86a10733c1eff19544eec", "grade": false, "grade_id": "cell-7d4bca62495646a5", "locked": true, "schema_version": 1, "solution": false} # Expected output: # ``` # Model: # {0: {2: (0, 1), 0: (0, 0), 1: (0, 0), 3: (1, 2)}, 2: {0: (1, 1), 1: (2, 0), 2: (2, 0), 3: (2, 0)}} # ``` # Note that the actions that were not taken from a state are also added to the model, with a loop back into the same state with a reward of 0. # + [markdown] deletable=false editable=false nbgrader={"checksum": "a7bc69509a63b874bea41e9e1dacddeb", "grade": false, "grade_id": "cell-d16ba2244f70cccc", "locked": true, "schema_version": 1, "solution": false} # Next, you will implement the `planning_step()` method. This will be very similar to the one you implemented in `DynaQAgent`, but here you will be adding the exploration bonus to the reward in the simulated transition. # + deletable=false nbgrader={"checksum": "0527e2f3bdd38392222bcaab60b86473", "grade": false, "grade_id": "cell-850b98235b2087aa", "locked": false, "schema_version": 1, "solution": true} # %%add_to DynaQPlusAgent # [GRADED] def planning_step(self): """performs planning, i.e. indirect RL. Args: None Returns: Nothing """ # The indirect RL step: # - Choose a state and action from the set of experiences that are stored in the model. (~2 lines) # - Query the model with this state-action pair for the predicted next state and reward.(~1 line) # - **Add the bonus to the reward** (~1 line) # - Update the action values with this simulated experience. (2~4 lines) # - Repeat for the required number of planning steps. # # Note that the update equation is different for terminal and non-terminal transitions. # To differentiate between a terminal and a non-terminal next state, assume that the model stores # the terminal state as a dummy state like -1 # # Important: remember you have a random number generator 'planning_rand_generator' as # a part of the class which you need to use as self.planning_rand_generator.choice() # For the sake of reproducibility and grading, *do not* use anything else like # np.random.choice() for performing search control. ### START CODE HERE ### for i in range(self.planning_steps): s = self.planning_rand_generator.choice(list(self.model.keys())) a = self.planning_rand_generator.choice(list(self.model[s].keys())) s_new, r = self.model[s][a] if s_new != -1: target = r + self.kappa*np.sqrt(self.tau[s,a])+ self.gamma*np.max(self.q_values[s_new]) else: target = r + self.kappa*np.sqrt(self.tau[s,a]) self.q_values[s, a] = self.q_values[s,a] + self.step_size*(target - self.q_values[s, a]) ### END CODE HERE ### # + [markdown] deletable=false editable=false nbgrader={"checksum": "d62d53a3b16a0a7fa4842f5775c442c2", "grade": false, "grade_id": "cell-f03c6dd8052fd06c", "locked": true, "schema_version": 1, "solution": false} # ### Test `planning_step()` # + deletable=false editable=false nbgrader={"checksum": "cdcaeef389cd61190d8c59f094144fbf", "grade": true, "grade_id": "DynaQPlus_planning_step", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for planning_step() ## actions = [] agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "kappa": 0.001, "planning_steps": 4, "random_seed": 0, "planning_random_seed": 1} test_agent = DynaQPlusAgent() test_agent.agent_init(agent_info) test_agent.update_model(0,1,-1,1) test_agent.tau += 1; test_agent.tau[0][1] = 0 test_agent.update_model(0,2,1,1) test_agent.tau += 1; test_agent.tau[0][2] = 0 # Note that these counts are manually updated test_agent.update_model(2,0,1,1) # as we'll code them in `agent_step' test_agent.tau += 1; test_agent.tau[2][0] = 0 # which hasn't been implemented yet. test_agent.planning_step() print("Model: \n", test_agent.model) print("Action-value estimates: \n", test_agent.q_values) # + [markdown] deletable=false editable=false nbgrader={"checksum": "bc713c424acb265ebef08b7c5d2321e8", "grade": false, "grade_id": "cell-c624d442e2ae7d30", "locked": true, "schema_version": 1, "solution": false} # Expected output: # ``` # Model: # {0: {1: (-1, 1), 0: (0, 0), 2: (1, 1), 3: (0, 0)}, 2: {0: (1, 1), 1: (2, 0), 2: (2, 0), 3: (2, 0)}} # Action-value estimates: # [[0. 0.10014142 0. 0. ] # [0. 0. 0. 0. ] # [0. 0.00036373 0. 0.00017321]] # # ``` # + [markdown] deletable=false editable=false nbgrader={"checksum": "45628d99c845d3b4f280c006e697799d", "grade": false, "grade_id": "cell-92d49553185d7c50", "locked": true, "schema_version": 1, "solution": false} # Again, before you move on to implement the rest of the agent methods, here are the couple of helper functions that you've used in the previous assessments for choosing an action using an $\epsilon$-greedy policy. # + deletable=false editable=false nbgrader={"checksum": "4ae83584fd55c126c09c1c43173dcbaf", "grade": false, "grade_id": "cell-c31cab304f2230ae", "locked": true, "schema_version": 1, "solution": false} # %%add_to DynaQPlusAgent # Do not modify this cell! def argmax(self, q_values): """argmax with random tie-breaking Args: q_values (Numpy array): the array of action values Returns: action (int): an action with the highest value """ top = float("-inf") ties = [] for i in range(len(q_values)): if q_values[i] > top: top = q_values[i] ties = [] if q_values[i] == top: ties.append(i) return self.rand_generator.choice(ties) def choose_action_egreedy(self, state): """returns an action using an epsilon-greedy policy w.r.t. the current action-value function. Important: assume you have a random number generator 'rand_generator' as a part of the class which you can use as self.rand_generator.choice() or self.rand_generator.rand() Args: state (List): coordinates of the agent (two elements) Returns: The action taken w.r.t. the aforementioned epsilon-greedy policy """ if self.rand_generator.rand() < self.epsilon: action = self.rand_generator.choice(self.actions) else: values = self.q_values[state] action = self.argmax(values) return action # + [markdown] deletable=false editable=false nbgrader={"checksum": "ff01d8dc7e39cfc84ff1e0799736d5a3", "grade": false, "grade_id": "cell-2af006f875c70cf7", "locked": true, "schema_version": 1, "solution": false} # Now implement the rest of the agent-related methods, namely `agent_start`, `agent_step`, and `agent_end`. Again, these will be very similar to the ones in the `DynaQAgent`, but you will have to think of a way to update the counts since the last visit. # + deletable=false nbgrader={"checksum": "2dff5e0b57e00801566b5b83956a18d6", "grade": false, "grade_id": "cell-34cb21ba9a8f931c", "locked": false, "schema_version": 1, "solution": true} # %%add_to DynaQPlusAgent # [GRADED] def agent_start(self, state): """The first method called when the experiment starts, called after the environment starts. Args: state (Numpy array): the state from the environment's env_start function. Returns: (int) The first action the agent takes. """ # given the state, select the action using self.choose_action_egreedy(), # and save current state and action (~2 lines) ### self.past_state = ? ### self.past_action = ? # Note that the last-visit counts are not updated here. ### START CODE HERE ### self.past_action = self.choose_action_egreedy(state) self.past_state = state ### END CODE HERE ### return self.past_action def agent_step(self, reward, state): """A step taken by the agent. Args: reward (float): the reward received for taking the last action taken state (Numpy array): the state from the environment's step based on where the agent ended up after the last step Returns: (int) The action the agent is taking. """ # Update the last-visited counts (~2 lines) # - Direct-RL step (1~3 lines) # - Model Update step (~1 line) # - `planning_step` (~1 line) # - Action Selection step (~1 line) # Save the current state and action before returning the action to be performed. (~2 lines) ### START CODE HERE ### # update the tau self.tau += 1 s, a = self.past_state, self.past_action self.tau[s, a] = 0 target = reward + self.gamma*np.max(self.q_values[state]) self.q_values[s, a] = self.q_values[s,a] + self.step_size*(target - self.q_values[s, a]) self.update_model(s,a, state, reward) self.planning_step() action = self.choose_action_egreedy(state) self.past_state = state self.past_action = action ### END CODE HERE ### return self.past_action def agent_end(self, reward): """Called when the agent terminates. Args: reward (float): the reward the agent received for entering the terminal state. """ # Again, add the same components you added in agent_step to augment Dyna-Q into Dyna-Q+ ### START CODE HERE ### self.tau += 1 s,a = self.past_state, self.past_action self.tau[s, a] = 0 target = reward self.q_values[s, a] = self.q_values[s,a] + self.step_size*(target - self.q_values[s, a]) self.update_model(s,a,-1,reward) self.planning_step() ### END CODE HERE ### # + [markdown] deletable=false editable=false nbgrader={"checksum": "0d0186afc16559c8d05fff29e5b91b50", "grade": false, "grade_id": "cell-da231fa8a614788e", "locked": true, "schema_version": 1, "solution": false} # Let's test these methods one-by-one. # + [markdown] deletable=false editable=false nbgrader={"checksum": "723356966ffc5fbeb16a6cd981071bbe", "grade": false, "grade_id": "cell-8db85fa89415ea0e", "locked": true, "schema_version": 1, "solution": false} # ### Test `agent_start()` # + deletable=false editable=false nbgrader={"checksum": "703e7137c1d55bd3649a2cabe18f6738", "grade": true, "grade_id": "DynaPlus_agent_start", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for agent_start() ## agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "kappa": 0.001, "random_seed": 0, "planning_random_seed": 0} test_agent = DynaQPlusAgent() test_agent.agent_init(agent_info) action = test_agent.agent_start(0) # state print("Action:", action) print("Timesteps since last visit: \n", test_agent.tau) print("Action-value estimates: \n", test_agent.q_values) print("Model: \n", test_agent.model) # + [markdown] deletable=false editable=false nbgrader={"checksum": "ea8c3a78e11dd96c73d1d02933e0ec64", "grade": false, "grade_id": "cell-f6fb327707c1855c", "locked": true, "schema_version": 1, "solution": false} # Expected output: # ``` # Action: 1 # Timesteps since last visit: # [[0. 0. 0. 0.] # [0. 0. 0. 0.] # [0. 0. 0. 0.]] # Action-value estimates: # [[0. 0. 0. 0.] # [0. 0. 0. 0.] # [0. 0. 0. 0.]] # Model: # {} # ``` # Remember the last-visit counts are not updated in `agent_start()`. # + [markdown] deletable=false editable=false nbgrader={"checksum": "28714342c4f7fec008e01c525e98d2a6", "grade": false, "grade_id": "cell-be8fc718581879ad", "locked": true, "schema_version": 1, "solution": false} # ### Test `agent_step()` # + deletable=false editable=false nbgrader={"checksum": "1ebd676152a198dc5e4e009198c36aee", "grade": true, "grade_id": "DynaQPlus_agent_step", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for agent_step() ## agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "kappa": 0.001, "planning_steps": 4, "random_seed": 0, "planning_random_seed": 0} test_agent = DynaQPlusAgent() test_agent.agent_init(agent_info) actions = [] actions.append(test_agent.agent_start(0)) # state actions.append(test_agent.agent_step(1,2)) # (reward, state) actions.append(test_agent.agent_step(0,1)) # (reward, state) print("Actions:", actions) print("Timesteps since last visit: \n", test_agent.tau) print("Action-value estimates: \n", test_agent.q_values) print("Model: \n", test_agent.model) # + [markdown] deletable=false editable=false nbgrader={"checksum": "b7f7f472dbae4f04ab045f9070443158", "grade": false, "grade_id": "cell-6cd0bcf30529fcca", "locked": true, "schema_version": 1, "solution": false} # Expected output: # ``` # Actions: [1, 3, 1] # Timesteps since last visit: # [[2. 1. 2. 2.] # [2. 2. 2. 2.] # [2. 2. 2. 0.]] # Action-value estimates: # [[1.91000000e-02 2.71000000e-01 0.00000000e+00 1.91000000e-02] # [0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00] # [0.00000000e+00 1.83847763e-04 4.24264069e-04 0.00000000e+00]] # Model: # {0: {1: (2, 1), 0: (0, 0), 2: (0, 0), 3: (0, 0)}, 2: {3: (1, 0), 0: (2, 0), 1: (2, 0), 2: (2, 0)}} # ``` # + [markdown] deletable=false editable=false nbgrader={"checksum": "069707871b3203dcaf81c9081726455b", "grade": false, "grade_id": "cell-ffbeb161866707da", "locked": true, "schema_version": 1, "solution": false} # ### Test `agent_end()` # + deletable=false editable=false nbgrader={"checksum": "cfdf00092d97d6e9ac98447202a6d292", "grade": true, "grade_id": "DynaQPlus_agent_end", "locked": true, "points": 5, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test code for agent_end() ## agent_info = {"num_actions": 4, "num_states": 3, "epsilon": 0.1, "step_size": 0.1, "discount": 1.0, "kappa": 0.001, "planning_steps": 4, "random_seed": 0, "planning_random_seed": 0} test_agent = DynaQPlusAgent() test_agent.agent_init(agent_info) actions = [] actions.append(test_agent.agent_start(0)) actions.append(test_agent.agent_step(1,2)) actions.append(test_agent.agent_step(0,1)) test_agent.agent_end(1) print("Actions:", actions) print("Timesteps since last visit: \n", test_agent.tau) print("Action-value estimates: \n", test_agent.q_values) print("Model: \n", test_agent.model) # + [markdown] deletable=false editable=false nbgrader={"checksum": "c008b44213589da78abaf90a07ebb93e", "grade": false, "grade_id": "cell-e4831f4d1cf10b12", "locked": true, "schema_version": 1, "solution": false} # Expected output: # ``` # Actions: [1, 3, 1] # Timesteps since last visit: # [[3. 2. 3. 3.] # [3. 0. 3. 3.] # [3. 3. 3. 1.]] # Action-value estimates: # [[1.91000000e-02 3.44083848e-01 0.00000000e+00 4.44632051e-02] # [1.91732051e-02 1.90000000e-01 0.00000000e+00 0.00000000e+00] # [0.00000000e+00 1.83847763e-04 4.24264069e-04 0.00000000e+00]] # Model: # {0: {1: (2, 1), 0: (0, 0), 2: (0, 0), 3: (0, 0)}, 2: {3: (1, 0), 0: (2, 0), 1: (2, 0), 2: (2, 0)}, 1: {1: (-1, 1), 0: (1, 0), 2: (1, 0), 3: (1, 0)}} # ``` # + [markdown] deletable=false editable=false nbgrader={"checksum": "334f1ceca4ab090a3e1ec653e3b496c2", "grade": false, "grade_id": "cell-839b7e5d8b7c439f", "locked": true, "schema_version": 1, "solution": false} # ### Experiment: Dyna-Q+ agent in the _changing_ environment # # Okay, now we're ready to test our Dyna-Q+ agent on the Shortcut Maze. As usual, we will average the results over 30 independent runs of the experiment. # + # Do NOT modify the parameter settings. # Experiment parameters experiment_parameters = { "num_runs" : 30, # The number of times we run the experiment "num_max_steps" : 6000, # The number of steps per experiment } # Environment parameters environment_parameters = { "discount": 0.95, "change_at_n": 3000 } # Agent parameters agent_parameters = { "num_states" : 54, "num_actions" : 4, "epsilon": 0.1, "step_size" : 0.5, "planning_steps" : [50] } current_env = ShortcutMazeEnvironment # The environment current_agent = DynaQPlusAgent # The agent run_experiment_with_state_visitations(current_env, current_agent, environment_parameters, agent_parameters, experiment_parameters, "Dyna-Q+") shutil.make_archive('results', 'zip', 'results'); # + [markdown] deletable=false editable=false nbgrader={"checksum": "0aae91605b0be56ef43e821b1719bfbf", "grade": false, "grade_id": "cell-d1f8fd21d4357f1a", "locked": true, "schema_version": 1, "solution": false} # Let's compare the Dyna-Q and Dyna-Q+ agents with `planning_steps=50` each. # + deletable=false editable=false nbgrader={"checksum": "d35e2a4111ca0d5f4baf83885e537139", "grade": false, "grade_id": "cell-ceee2185289f571c", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! def plot_cumulative_reward_comparison(file_name_dynaq, file_name_dynaqplus): cum_reward_q = np.load(file_name_dynaq).item()['cum_reward_all'][2] cum_reward_qPlus = np.load(file_name_dynaqplus).item()['cum_reward_all'][0] plt.plot(np.mean(cum_reward_qPlus, axis=0), label='Dyna-Q+') plt.plot(np.mean(cum_reward_q, axis=0), label='Dyna-Q') plt.axvline(x=3000, linestyle='--', color='grey', alpha=0.4) plt.xlabel('Timesteps') plt.ylabel('Cumulative\nreward', rotation=0, labelpad=60) plt.legend(loc='upper left') plt.title('Average performance of Dyna-Q and Dyna-Q+ agents in the Shortcut Maze\n') plt.show() # + # Do not modify this cell! plot_cumulative_reward_comparison('results/Dyna-Q_shortcut_steps.npy', 'results/Dyna-Q+.npy') # + [markdown] deletable=false editable=false nbgrader={"checksum": "21076543e2048b8680e6e0aef7fe3eb8", "grade": false, "grade_id": "cell-1754be7eabd4a8aa", "locked": true, "schema_version": 1, "solution": false} # What do you observe? (For reference, your graph should look like [Figure 8.5 in Chapter 8](http://www.incompleteideas.net/book/RLbook2018.pdf#page=189) of the RL textbook) # # The slope of the curve increases for the Dyna-Q+ curve shortly after the shortcut opens up after 3000 steps, which indicates that the rate of receiving the positive reward increases. This implies that the Dyna-Q+ agent finds the shorter path to the goal. # # To verify this, let us plot the state-visitations of the Dyna-Q+ agent before and after the shortcut opens up. # + # Do not modify this cell! plot_state_visitations("results/Dyna-Q+.npy", ['Dyna-Q+ : State visitations before the env changes', 'Dyna-Q+ : State visitations after the env changes'], 0) # + [markdown] deletable=false editable=false nbgrader={"checksum": "449ce5338414248561fc1c8219cc3792", "grade": false, "grade_id": "cell-fafb7f5a25d136fb", "locked": true, "schema_version": 1, "solution": false} # What do you observe? # # Before the shortcut opens up, like Dyna-Q, the Dyna-Q+ agent finds the sole, long path to the goal. But because the Dyna-Q+ agent keeps exploring, it succeeds in discovering the shortcut once it opens up, which leads to the goal faster. So the bonus reward heuristic is effective in helping the agent explore and find changes in the environment without degrading the performance. # + [markdown] deletable=false editable=false nbgrader={"checksum": "29cd9402feeb69c6d64d8ae08c9aa201", "grade": false, "grade_id": "cell-89485f6ff67b4a48", "locked": true, "schema_version": 1, "solution": false} # ## Wrapping Up # # Congratulations! You have: # # 1. implemented Dyna-Q, a model-based approach to RL; # 2. implemented Dyna-Q+, a variant of Dyna-Q with an exploration bonus that encourages exploration; # 3. conducted scientific experiments to empirically validate the exploration/exploitation dilemma in the planning context on an environment that changes with time. # + [markdown] deletable=false editable=false nbgrader={"checksum": "bdccc04273c6aadc27991c28fb2065e9", "grade": false, "grade_id": "cell-5ff77de3e5722637", "locked": true, "schema_version": 1, "solution": false} # Some points to ponder about: # 1. At what cost does Dyna-Q+ improve over Dyna-Q? # 2. In general, what is the trade-off of using model-based methods like Dyna-Q over model-free methods like Q-learning? # # # --- # # **Note**: Apart from using the 'Submit' button in the notebook, you have to submit an additional `zip` file containing the 'npy' files that were generated from running the experiment cells. To do so: # # 1. Generate the zip file by running the experiment cells in the notebook. On the top of the notebook, navigate to 'File->Open' to open the directory view of this assignment. Select "results.zip" and click on "Download". Alternatively, you can download just the results folder and run "zip -jr results.zip results/" (_The flag 'j' is required by the grader!_). # 2. Go to the "My submission" tab and click on "+ Create submission". # 3. Click on "C2M4 Data-file Grader" and upload your `results.zip`. # # **This accounts for 50% of the marks, so don't forget to do so!**
C2-Sample_based/Dyna_Q&Dyna_Q+/Planning_Assignment-v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 물체 검출 결과 평가 방법 이해하기 # # 1. 주어진 recall값에 대한 precision과 confidence threshold 값을 계산합니다.<br/> # + 계산한 threshold 값을 이용하여 검출 결과를 출력합니다. (practice 1.ipynb 활용) <br/> # 2. 주어진 confidence threshold 값에서의 precision과 recall값을 구해봅니다. # + from __future__ import print_function import torch import torch.nn as nn import torch.backends.cudnn as cudnn from torch.autograd import Variable from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform import torch.utils.data as data from utils import Timer from ssd import build_ssd import sys import os import time import argparse import numpy as np import pickle import cv2 # # %matplotlib inline from matplotlib import pyplot as plt import xml.etree.ElementTree as ET labelmap = ( # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') annopath = os.path.join(VOC_ROOT, 'VOC2007', 'Annotations', '%s.xml') imgpath = os.path.join(VOC_ROOT, 'VOC2007', 'JPEGImages', '%s.jpg') imgsetpath = os.path.join(VOC_ROOT, 'VOC2007', 'ImageSets', 'Main', '{:s}.txt') YEAR = '2007' dataset_mean = (104, 117, 123) if torch.cuda.is_available(): torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor') # - # ## SSD 모델 및 PASCAL VOC2007 데이터 셋 로드 # + ### load net num_classes = len(labelmap) + 1 # +1 for background net = build_ssd('vgg16', 300, 21) # initialize SSD # checkpoint = 'weights/ssd300_before_optimize.pth' # checkpoint = 'weights/ssd300_epoch_001.pth' checkpoint = 'weights/ssd300_mAP_77.43_v2.pth' net.load_weights(checkpoint) net.eval() print('Finished loading model!') ### load data set_type = 'test' dataset = VOCDetection(VOC_ROOT, [('2007', set_type)], BaseTransform(300, dataset_mean), VOCAnnotationTransform()) if torch.cuda.is_available(): net = net.cuda() cudnn.benchmark = True # - # ## 물체 검출 수행 # Practice 1과 같이 물체 검출을 모든 테스트 셋에 속하는 영상에 대하여 수행합니다. # + save_folder = os.path.basename(checkpoint) from layers import Detect num_images = len(dataset) parser = Detect(num_classes, 0, 200, 0.1, 0.45) softmax = nn.Softmax(dim=-1) # all detections are collected into: # all_boxes[cls] = N x 6 array of detections in # (x1, y1, x2, y2, score, ids) all_boxes =[np.zeros((0,6)) for _ in range(len(labelmap)+1)] # timers _t = {'forward': Timer(), 'misc': Timer()} output_dir = os.path.join(save_folder, set_type) if not os.path.exists(output_dir): os.makedirs(output_dir) det_file = os.path.join(output_dir, 'detections.pkl') if not os.path.exists(det_file): for i in range(num_images): im, gt, h, w, ids = dataset.pull_item_with_index(i) x = im.unsqueeze(0) if torch.cuda.is_available(): x = x.cuda() _t['forward'].tic() with torch.no_grad(): loc_pred, cls_pred, priors = net(x) forward_time = _t['forward'].toc(average=True) _t['misc'].tic() detections = parser(loc_pred, softmax(cls_pred), priors.type(type(x))) # skip j = 0, because it's the background class for j in range(1, detections.size(1)): dets = detections[0, j, :] # detections for j-th target class mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t() dets = torch.masked_select(dets, mask).view(-1, 5) if dets.shape[0] == 0: continue boxes = dets[:, 1:] boxes[:, 0] *= w # because, box coordinates were normalized to [0,1] boxes[:, 2] *= w boxes[:, 1] *= h boxes[:, 3] *= h scores = dets[:, 0].cpu().numpy() ids_dets = ids*np.ones( (len(scores),1) ) # store image index for evaluation cls_dets = np.hstack( # cls_dets: M x 6 matrix [[x,y,w,h,score,img_idx]] (boxes.cpu().numpy(),\ scores[:, np.newaxis], \ ids_dets )).astype(np.float32, copy=False) all_boxes[j] = np.vstack( (all_boxes[j], cls_dets) ) misc_time = _t['misc'].toc(average=True) if i % 100 == 0: print('[im_detect: {:d}/{:d}] forward: {:.3f}s, misc: {:.3f}s'.format(i+1, num_images, forward_time, misc_time)) with open(det_file, 'wb') as f: pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL) print('Saved to {:}.'.format(det_file)) else: with open(det_file, 'rb') as f: all_boxes = pickle.load(f) print('Load from {:}.'.format(det_file)) # - # ## 정답 데이터(Ground truth) 획득 # PASCAL VOC2007 데이터셋에서 제공하는 bounding box 정보들을 로드하기 위한 함수입니다. def parse_rec(filename): """ Parse a PASCAL VOC xml file """ tree = ET.parse(filename) objects = [] for obj in tree.findall('object'): obj_struct = {} obj_struct['name'] = obj.find('name').text obj_struct['pose'] = obj.find('pose').text obj_struct['truncated'] = int(obj.find('truncated').text) obj_struct['difficult'] = int(obj.find('difficult').text) bbox = obj.find('bndbox') obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1, int(bbox.find('ymin').text) - 1, int(bbox.find('xmax').text) - 1, int(bbox.find('ymax').text) - 1] objects.append(obj_struct) return objects # ## 검출 성능 평가 함수 # 주어진 클래스(classname)에 대해서 성능을 평가하는 함수입니다. # parse_rec 함수를 이용하여 정답 데이터를 로드하고, # 검출된 결과들이 있다면(det_boxes) 다음과 같이 성능을 평가합니다. # # 0. 검출 결과를 confidence score순으로 정렬합니다. # 1. 높은 confidence를 가지는 검출 결과부터 시작하여, 해당 검출 결과가 발생한 영상의 정답(GT)들과 IoU를 계산합니다. # 2. 가장 높은 IoU가 thershold 이상이라면 TP, 그렇지 않으면 FP 등으로 표시합니다. # 3. 계산된 TP와 FP, npos(TP+FN)를 이용하여 precision과 recall을 계산합니다. # # + [TODO] 아래 cell에서 주어지는 given_recall 값에 해당하는 precision 및 confidence score 계산을 위해서 필요한 부분을 수정하세요. # Hint: 함수를 이용하여 특정 결과를 얻어내려면 반환(return)값을 설정해야 합니다. # 1) 주어진 코드를 잘 읽고 어떤 값들을 반환해야 하는지 생각해보세요. # 2) python 함수에서 반환값을 설정할 때 필요한 명령어를 검색해보세요. def voc_eval(det_boxes, annopath, imagesetfile, classname, cachedir, ovthresh=0.5): # first load gt if not os.path.isdir(cachedir): os.mkdir(cachedir) cachefile = os.path.join(cachedir, 'annots.pkl') # read list of images with open(imagesetfile, 'r') as f: lines = f.readlines() imagenames = [x.strip() for x in lines] if not os.path.isfile(cachefile): # load annots recs = {} for i, imagename in enumerate(imagenames): recs[imagename] = parse_rec(annopath % (imagename)) if i % 100 == 0: print('Reading annotation for {:d}/{:d}'.format( i + 1, len(imagenames))) # save print('Saving cached annotations to {:s}'.format(cachefile)) with open(cachefile, 'wb') as f: pickle.dump(recs, f) else: # load with open(cachefile, 'rb') as f: recs = pickle.load(f) # extract gt objects for this class class_recs = {} npos = 0 for imagename in imagenames: R = [obj for obj in recs[imagename] if obj['name'] == classname] bbox = np.array([x['bbox'] for x in R]) difficult = np.array([x['difficult'] for x in R]).astype(np.bool) det = [False] * len(R) npos = npos + sum(~difficult) class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det} if len(det_boxes): BB = det_boxes[:,:4] confidence = det_boxes[:,4] image_ids = det_boxes[:,5] # sort by confidence sorted_ind = np.argsort(-confidence) sorted_scores = np.sort(-confidence) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): R = class_recs['%06d'%int(image_ids[d])] bb = BB[d, :].astype(float) ovmax = -np.inf BBGT = R['bbox'].astype(float) if BBGT.size > 0: # compute overlaps # intersection ixmin = np.maximum(BBGT[:, 0], bb[0]) iymin = np.maximum(BBGT[:, 1], bb[1]) ixmax = np.minimum(BBGT[:, 2], bb[2]) iymax = np.minimum(BBGT[:, 3], bb[3]) iw = np.maximum(ixmax - ixmin, 0.) ih = np.maximum(iymax - iymin, 0.) inters = iw * ih uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) + (BBGT[:, 2] - BBGT[:, 0]) * (BBGT[:, 3] - BBGT[:, 1]) - inters) overlaps = inters / uni ovmax = np.max(overlaps) jmax = np.argmax(overlaps) if ovmax > ovthresh: if not R['difficult'][jmax]: # if matched GT is difficult, detected box will be ignored. if not R['det'][jmax]: # check duplicated detection. tp[d] = 1. R['det'][jmax] = 1 else: fp[d] = 1. # if so, detected box will be considered as false positive. else: fp[d] = 1. # if the detected box is not matched to any GT boxes, false positive. # compute precision recall fp = np.cumsum(fp) tp = np.cumsum(tp) rec = tp / float(npos) # avoid divide by zero in case the first detection matches a difficult # ground truth prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) # 11 point metric ap = 0. for t in np.arange(0., 1.1, 0.1): # recall thresholds if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11. else: rec = -1. prec = -1. ap = -1. ### [TODO] hint: 무엇인가 추가로 return 해야 합니다. # return rec, prec, ap return rec, prec, ap, -sorted_scores # ## 성능 평가 수행 # 위에 주어진 성능 평가 함수 voc_eval을 활용하여 검출 성능을 평가하고, PR curve를 그려봅니다. # # + [TODO] 주어진 recall 값에 해당하는 precision과 score 값을 계산하기 위하여 필요한 부분을 수정하거나 새로 작성하세요. # Hint: 우리는 [1]번 cell에서 numpy라는 library를 사용하겠다고 정의했습니다. # 1) numpy library에서 특정 조건을 만족하는 행렬의 index를 찾아내는 함수를 찾아보세요. (search example: numpy find conditional index) # 2) 1)의 방법으로 찾아낸 index를 어떻게 활용할 것인지 생각해보세요. # + given_recall = 0.1 print('Evaluating detections') print('Given recall: {}'.format(given_recall)) cachedir = os.path.join(save_folder, 'annotations_cache') aps = [] for i, cls in enumerate(labelmap): print('#' * 30) ### [TODO] rec, prec, ap = voc_eval(all_boxes[i+1], annopath, imgsetpath.format(set_type), cls, cachedir, ovthresh=0.5) aps += [ap] print('AP for {} = {:.4f}'.format(cls, ap)) ### [TODO] corresponding_prec = None corresponding_score = None # uncomment after writing code # print('Corresponding precision: {:.4f}, confidence score: {:.4f}'.format(corresponding_prec, corresponding_score)) # draw PR curve fig = plt.figure() ax = fig.gca() ax.set_xticks(np.arange(0, 1, 0.1)) ax.set_yticks(np.arange(0, 1., 0.1)) ax.plot(rec, prec) plt.grid() plt.title('Precision-recall curve: {:s}'.format(cls)) # uncomment after writing code # plt.plot(given_recall, corresponding_prec, color='red', marker='*', markersize=10) plt.show() print('') print('Mean AP = {:.4f}'.format(np.mean(aps))) # + given_threshold = 0.8 print('Evaluating detections') print('Given recall: {}'.format(given_recall)) cachedir = os.path.join(save_folder, 'annotations_cache') aps = [] for i, cls in enumerate(labelmap): print('#' * 30) ### [TODO] rec, prec, ap = voc_eval(all_boxes[i+1], annopath, imgsetpath.format(set_type), cls, cachedir, ovthresh=0.5) aps += [ap] print('AP for {} = {:.4f}'.format(cls, ap)) ### [TODO] corresponding_prec = None corresponding_rec = None # uncomment after writing code # print('Corresponding precision: {:.4f}, recall: {:.4f}'.format(corresponding_prec, corresponding_rec)) # draw PR curve fig = plt.figure() ax = fig.gca() ax.set_xticks(np.arange(0, 1, 0.1)) ax.set_yticks(np.arange(0, 1., 0.1)) ax.plot(rec, prec) plt.grid() plt.title('Precision-recall curve: {:s}'.format(cls)) # uncomment after writing code # plt.plot(corresponding_rec, corresponding_prec, color='red', marker='*', markersize=10) plt.show() print('') print('Mean AP = {:.4f}'.format(np.mean(aps)))
hands-on-experience/practice2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # 1806554 <NAME> Lab 2 name <- as.character(readline(prompt = "name")) age <- as.integer(readline(prompt = "age")) cat("Name : ", name,"\n","Age : ", age) roll <- as.integer(readline(prompt = "roll no")) name <- as.character(readline(prompt = "name")) branch <- as.character(readline(prompt = "branch")) cat("roll->",roll,"\n","name->",name,"\n","branch->",branch) x <- readline() x <- strsplit(x," ") x <- as.integer(x[[1]]) cat("sum : ",x[1]+x[2],"\t","mul : ",x[1]*x[2],"\t","div : ",x[1]/x[2],"\t","sub : ",x[1]-x[2]) x <- as.character(readline()) x <- strsplit(x,"") x <- as.integer(x[[1]]) cat("sum of 3 digits of number : ",x[1]+x[2]+x[3]) x <- as.integer(readline()) area = 3.14*x*x circumference = 2*3.14*x cat("area : ",area,"\n","circumference : ",circumference) cat("seq bet 20 to 50 : ",c(20:50)) cat("\n") cat("mean bet 20 to 60 : ",mean(20:60)) cat("\n") cat("sum for 51 to 91 : ",sum(51:91)) round(runif(10,-50,50)) x <- readline() x <- strsplit(x," ") x <- as.integer(x[[1]]) max(x) min(x) x <- c(1,2,3,4,5) y <- c("hey","how","are","you","bruh") z <- x > 1 class(x) cat(x,"\n") class(y) cat(y,"\n") class(z) cat(z) x <- readline() x <- strsplit(x," ") x <- as.integer(x[[1]]) cat("sum : ",sum(x),"\n","mean : ",mean(x),"\n","product : ",prod(x)) tmp <- c(1:20) print(tmp) tmp <- c(20:1) print(tmp) tmp <- c(1:20,20:1) print(tmp) tmp <- c(4,6,3) print(tmp)
DA Lab/2_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import os import sys module_path = os.path.abspath(os.path.join('..')) sys.path.append(module_path) import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') color = sns.color_palette() # %matplotlib inline matplotlib.style.use('ggplot') plt.rcParams['figure.figsize'] = [12, 6] from datetime import timedelta import datetime as dt from IPython.display import display # remove warnings import warnings warnings.filterwarnings('ignore') # - train, test = data_utils.load_dataset(op_scope='4') print 'train: {}, test: {}'.format(train.shape, test.shape) # trip_durations = train['trip_duration'] # del train['trip_duration'] # conbined_data = pd.concat([train, test]) # + summary_wdays_avg_duration = pd.DataFrame(train.groupby(['vendor_id','pickup_weekday'])['trip_duration'].mean()) summary_wdays_avg_duration.reset_index(inplace = True) summary_wdays_avg_duration['unit']=1 sns.set(style="white", palette="muted", color_codes=True) sns.set_context("poster") sns.tsplot(data=summary_wdays_avg_duration, time="pickup_weekday", unit = "unit", condition="vendor_id", value="trip_duration") sns.despine(bottom = False) # - groupby_df = train.groupby(['vendor_id','pickup_weekday'])['trip_duration'].mean().reset_index() groupby_df def vendor_id_pickup_weekday(vendor_id, pickup_weekday): return groupby_df[groupby_df['vendor_id'] == vendor_id][groupby_df['pickup_weekday'] == pickup_weekday]['trip_duration'].values[0] train['groupby_vi_pweekdat_trip_duration'] = train.loc[:10, :].apply(lambda row: vendor_id_pickup_weekday(row['vendor_id'], row['pickup_weekday']), axis=1) test['groupby_vi_pweekdat_trip_duration'] = test.loc[:10, :].apply(lambda row: vendor_id_pickup_weekday(row['vendor_id'], row['pickup_weekday']), axis=1) # + summary_wdays_avg_duration = pd.DataFrame(train.groupby(['vendor_id','pickup_hour'])['trip_duration'].mean()) summary_wdays_avg_duration.reset_index(inplace = True) summary_wdays_avg_duration['unit']=1 sns.set(style="white", palette="muted", color_codes=True) sns.set_context("poster") sns.tsplot(data=summary_wdays_avg_duration, time="pickup_hour", unit = "unit", condition="vendor_id", value="trip_duration") sns.despine(bottom = False) # -
features/other_feature_engineering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python # coding: utf-8 #matplotlib inline from __future__ import division import numpy as np from numpy.random import rand from numpy import linalg as LA import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.lines import Line2D import matplotlib.lines as mlines import math import sys import os from random import shuffle from scipy.interpolate import UnivariateSpline from scipy.interpolate import InterpolatedUnivariateSpline from scipy.optimize import fmin from scipy.optimize import fsolve from scipy import interpolate from scipy.optimize import curve_fit import scipy.optimize as opt import matplotlib.colors as colors import matplotlib.cm as cmx from scipy.signal import savgol_filter from random import gauss import matplotlib.ticker as ticker from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator) from matplotlib import rc rc('font',**{'family':'sans-serif', 'size' : 10}) #, 'sans-serif':['Arial']}) ## for Palatino and other serif fonts use: #rc('font',**{'family':'serif','serif':['Palatino']}) rc('text', usetex=True) color_red = (0.73, 0.13869999999999993, 0.) color_orange = (1., 0.6699999999999999, 0.) color_green = (0.14959999999999996, 0.43999999999999995, 0.12759999999999994) color_blue = (0.06673600000000002, 0.164512, 0.776) color_purple = (0.25091600000000003, 0.137378, 0.29800000000000004) color_ocker = (0.6631400000000001, 0.71, 0.1491) color_pink = (0.71, 0.1491, 0.44730000000000003) color_brown = (0.651, 0.33331200000000005, 0.054683999999999955) color_all = [color_red, color_orange, color_green, color_blue, color_purple, color_ocker,color_pink, color_brown] #fit_result_scaling val_c = [ 1.20, 2.5, 0.33, 0.15, 0.23, 0.0, 0.0]; val_m = [1.20, 1.25, -0.18, 0.13, 0.15, 0.19, 0.15]; val_chi = [1.2034, 0.943, 1.44, 0.11, 0.93, 0.05, 0.1]; from funcfssa import autoscale, scaledata #usual scaling from funcfssa2 import autoscale2, scaledata2 #correction in y from funcfssa3 import autoscale3, scaledata3 #added correction in x j2 = 1.0 j6 = 1.0 Kc = 0.0 lambda3 = 2.1 q_Q = 1 N_og = [40, 60, 80, 100, 120, 140, 160, 180, 200, 240, 260, 340, 380] N_list = N_og #### #code ### cv_ind = 1 chi_ind = 10 #for chi_sigma m_ind = 9 #for m_sigma data = np.load('Delta1_data_v2.npy',allow_pickle=True) ###### #----------------------------------------------------------------------------------------------------------------------- ####### #parameters of the code ###### #----------------------------------------------------------------------------------------------------------------------- ###### #size_investigate = [20, 32] Blocks = ['*','o', 'v', 'd','s','+', 'x'] #colors_size = ['blue', 'red', 'green', 'brown'] cNorm = colors.Normalize(vmin=0, vmax=1) #scalarMap = cmx.ScalarMappable(norm=cNorm, cmap='brg_r') scalarMap = cmx.ScalarMappable(norm=cNorm, cmap='viridis_r') colors_size = [scalarMap.to_rgba(i/(len(N_list)+ 2-1)) for i in range(len(N_list))] ### scaling of specific heat ### ls = N_list a_arr = [] da_arr = [] flat_temp = [] ind = cv_ind #here choose your fit scaleC = 1 rhos = np.array(data[-1][0]) for i in range(len(N_list)): i_og = i prelim_y = (N_list[i]**(2*scaleC))*data[i_og][2*ind + 1] prelim_x = data[i_og][0] spl_alt = InterpolatedUnivariateSpline(prelim_x, prelim_y, k=5) a_arr.append(spl_alt(rhos)) da_arr.append(0.01*spl_alt(rhos)) a_arr = np.array(a_arr) da_arr = np.array(da_arr) print('\nSpecific heat:') print('(Tc, nu, beta, beta/nu) = (1.2035, 0.83, 0.33, 0.4)') print('values: Tc, nu, zeta, omega, a') print(val_c) noisy_auto_scaled_data =scaledata3(ls, rhos, a_arr, da_arr, *val_c) fig = plt.figure(figsize = (3.375,6.26) ) ax1 = plt.subplot(3, 1, 1) #specific heat for i in range(len(N_list)): #hand test #noisy_auto_scaled_data =scaledata2(ls, rhos, a_arr, da_arr, *testV) y_val = (noisy_auto_scaled_data.y.T)[:,i] x_val = (noisy_auto_scaled_data.x.T)[:,i] #new_dat = scaledata2(ls, rhos, a_arr, da_arr, *[1.203, 5/6, 1/3, 0.05, 0.05]) #y_val = (new_dat.y.T)[:,i] #x_val = (new_dat.x.T)[:,i] ax1.plot(x_val, y_val, marker ='o', markersize = 2, linestyle = '-', linewidth = 0.5, color=colors_size[i]) #ax1.set_xlabel(r"$(T - T_{3})L^{1/\nu}/(1 + a_2 L^{-\omega_2})$", fontsize=10); ax1.set_ylabel(r'$c_{v}L^{\alpha/\nu}/(1 + a_1 L^{-\omega_1})$ ', fontsize=10); # ax1.set_xlabel(r"$\tilde{t} = (T - T_{potts})L^{1/\nu} (1 + a_2 L^{-\omega_2})^{-1}$", fontsize=10); # ax1.set_ylabel(r'Scaled Specific Heat $\tilde{c}_{v} = L^{\beta/\nu} c_{v} (1 + a_1 L^{-\omega_1})^{-1}$ ', fontsize=10); #do the legend and append at the end # patches_lg = [] # for i in range(len(N_list)): # patches_lg.append(patches.Patch(color=colors_size[i], label='L='+str(int(N_list[i])))) #ax.legend(handles=patches_lg, loc='upper right', bbox_to_anchor=(0.98,0.95), ncol=1,fontsize = 10) patches_lg = [] for i in range(4): patches_lg.append(Line2D([0], [0], color=colors_size[i], linewidth = 1, linestyle = '-', label='$'+str(int(N_list[i]))+'$') ) ax1.legend(handles=patches_lg, loc='upper left', title = '$L = $', title_fontsize = 9, ncol=1, fontsize = 9) ax1.xaxis.set_minor_locator(MultipleLocator(5)) ax1.grid(which='major', axis='both', linestyle='-', alpha = 0.4) ax1.grid(which='minor', axis='both', linestyle='-', alpha = 0.2) ax1.tick_params(axis='both', which='major', labelsize=10) ax1.tick_params(axis='both', which='minor', labelsize=10) # fig.tight_layout() # # plt.savefig('./scaledCV.png', format='png', dpi = 100, bbox_inches='tight') # plt.show() ### scaling of magnetization ### ls = N_list a_arr = [] da_arr = [] flat_temp = [] ind = m_ind #here choose your fit scaleC = 0 rhos = np.array(data[-1][0]) for i in range(len(N_list)): i_og = i prelim_y = (N_list[i]**(2*scaleC))*data[i_og][2*ind + 1] prelim_x = data[i_og][0] spl_alt = InterpolatedUnivariateSpline(prelim_x, prelim_y, k=5) a_arr.append(spl_alt(rhos)) da_arr.append(0.01*spl_alt(rhos)) a_arr = np.array(a_arr) da_arr = np.array(da_arr) print('\nMagnetization:') print('(Tc, nu, beta, beta/nu) = (1.2035, 0.83, 0.11, 0.1333)') print('values: Tc, nu, zeta, omega, a') print(val_m) noisy_auto_scaled_data =scaledata3(ls, rhos, a_arr, da_arr, *val_m) ax2 = plt.subplot(3, 1, 2) #specific heat for i in range(len(N_list)): y_val = (noisy_auto_scaled_data.y.T)[:,i] x_val = (noisy_auto_scaled_data.x.T)[:,i] ax2.plot(x_val, y_val, marker ='o', markersize = 2, linestyle = '-', linewidth = 0.5, color=colors_size[i]) ax2.plot(x_val, y_val, marker ='o', markersize = 2, linestyle = '-', linewidth = 0.5, color=colors_size[i]) ax2.plot(x_val, y_val, marker ='o', markersize = 2, linestyle = '-', linewidth = 0.5, color=colors_size[i]) #ax2.set_xlabel(r"$(T - T_{3})L^{1/\nu}/(1 + a_2 L^{-\omega_2})$", fontsize=10); ax2.set_ylabel(r'$m_{\sigma}L^{-\beta/\nu} /(1 + a_1 L^{-\omega_1})$ ', fontsize=10); #do the legend and append at the end patches_lg = [] for i in range(4, 9): patches_lg.append(Line2D([0], [0], color=colors_size[i], linewidth = 1, linestyle = '-', label='$'+str(int(N_list[i]))+'$') ) ax2.legend(handles=patches_lg, loc='lower left', title_fontsize = 9, ncol=1,fontsize = 9) #ax2.legend(handles=patches_lg, loc='best', ncol=1,fontsize = 9) #ax2.legend(handles=patches_lg, loc='best', ncol=1,fontsize = 9) # bbox_to_anchor=(0.98,0.95 ax2.xaxis.set_minor_locator(MultipleLocator(5)) ax2.grid(which='major', axis='both', linestyle='-', alpha = 0.4) ax2.grid(which='minor', axis='both', linestyle='-', alpha = 0.2) ax2.tick_params(axis='both', which='both', labelsize=10) # fig.tight_layout() # # plt.savefig('./scaledMag.png', format='png', dpi = 100, bbox_inches='tight') # plt.show() ### scaling of susceptibility ### ls = N_list a_arr = [] da_arr = [] flat_temp = [] ind = chi_ind #here choose your fit scaleC = 1 # actual range print(f'Range of temperature {data[2][0]}.') tstart = 1.20 prelim_x_p = data[-1][0] uind = np.argmin(np.absolute(prelim_x_p - tstart)) rhos = np.array(data[-1][0])[uind:] for i in range(len(N_list)): i_og = i prelim_x_p = data[i_og][0] uind = np.argmin(np.absolute(prelim_x_p - tstart)) prelim_y = (N_list[i]**(2*scaleC))*data[i_og][2*ind + 1][uind:] prelim_x = prelim_x_p[uind:] spl_alt = InterpolatedUnivariateSpline(prelim_x, prelim_y, k=5) a_arr.append(spl_alt(rhos)) da_arr.append(0.01*spl_alt(rhos)) a_arr = np.array(a_arr) da_arr = np.array(da_arr) # scaling function ansatz autoscale (no corrections to scaling) or autoscale2 (corrections to scaling on x or y) or autoscale3 (corrections to scaling on both x and y) print('\nSusceptibility:') print('(Tc, nu, gamma, gamma/nu) = (1.2035, 0.83, 1.44, 1.733)') print('Susceptibility:') print('values: Tc, nu, zeta, omega, a') print(val_chi) noisy_auto_scaled_data =scaledata3(ls, rhos, a_arr, da_arr, *val_chi) #fig, ax = plt.subplots31,1igsize=(15,10)) ax3 = plt.subplot(3, 1, 3) #fig.set_size_inches(12,6) #fig.set_dpi(100) #susceptibility for i in range(len(N_list)): #y_val = (N_list[i]**(2*scale))*data_thermo[i][:,ind] #x_val = range_x[0] y_val = (noisy_auto_scaled_data.y.T)[:,i] x_val = (noisy_auto_scaled_data.x.T)[:,i] ax3.plot(x_val, y_val, marker ='o', markersize = 2, linestyle = '-', linewidth = 0.5, color=colors_size[i]) ax3.set_xlabel(r"$(T - T_{3})L^{1/\nu}/(1 + a_2 L^{-\omega_2})$", fontsize=10); ax3.set_ylabel(r'$\chi_{\sigma}L^{\gamma/\nu} /(1 + a_1 L^{-\omega_1})$ ', fontsize=10); #do the legend and append at the end patches_lg = [] for i in range(9, 13): patches_lg.append(Line2D([0], [0], color=colors_size[i], linewidth = 1, linestyle = '-', label='$'+str(int(N_list[i]))+'$') ) ax3.legend(handles=patches_lg, loc='lower left', title_fontsize = 9, ncol=1,fontsize = 9) #do the legend and append at the end # patches_lg = [] # for i in range(len(N_list)): # #patches_lg.append(patches.Patch(color=colors_size[i], label='L='+str(int(N_list[i])))) # patches_lg.append(Line2D([0], [0], color=colors_size[i], linewidth = 1, linestyle = '-', label='$'+str(int(N_list[i]))+'$') ) #ax3.legend(handles=patches_lg, loc='upper right', bbox_to_anchor=(0.98,0.95), ncol=1,fontsize = 10) ax3.xaxis.set_minor_locator(MultipleLocator(5)) ax3.grid(which='major', axis='both', linestyle='-', alpha = 0.4) ax3.grid(which='minor', axis='both', linestyle='-', alpha = 0.2) ax3.tick_params(axis='both', which='both', labelsize=10) fig.tight_layout() plt.savefig('./scaledSusc.png', format='png', dpi = 600, bbox_inches='tight') # plt.show()
other/test_scaling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # 주어진 데이터를 사용하여 키와 몸무개를 예측합니다. # + [markdown] pycharm={"name": "#%% md\n"} # ## 단순 선형 회귀분석(1차함수) # - # ### 데이터준비 # + pycharm={"name": "#%%\n"} import pandas as pd filepath = './dataset/data.csv' df = pd.read_csv(filepath) height = df['Height'] weight = df['Weight'] height_list = [] weight_list = [] for h in height: height_list.append(h) for w in weight: weight_list.append(w) # + pycharm={"name": "#%%\n"} import numpy as np height_array = np.array(height_list) weight_array = np.array(weight_list) # + pycharm={"name": "#%%\n"} from sklearn.model_selection import train_test_split # 훈련 세트와 테스트 세트로 나누기 train_input, test_input, train_target, test_target = train_test_split( height_array, weight_array, random_state=15) # + pycharm={"name": "#%%\n"} train_input = train_input.reshape(-1, 1) test_input = test_input.reshape(-1, 1) # + [markdown] pycharm={"name": "#%% md\n"} # ### 데이터 시각화하기 # + pycharm={"name": "#%%\n"} import matplotlib.pyplot as plt plt.scatter(train_input, train_target) plt.xlabel('Height') plt.ylabel('Weight') plt.show() # + [markdown] pycharm={"name": "#%% md\n"} # ### 단순 선형회귀 모델을 훈련하기 # + pycharm={"name": "#%%\n"} from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(train_input, train_target) print(lr.score(test_input, test_target)) # + [markdown] pycharm={"name": "#%% md\n"} # 98%라는 높은 정확도를 보여주었습니다. 이제 위 모델에서 훈련한 직선을 산점도 위에 그려보도록 하겠습니다. # + pycharm={"name": "#%%\n"} plt.scatter(train_input, train_target) plt.plot([1.47, 1.83], [1.47*lr.coef_ + lr.intercept_, 1.83*lr.coef_+lr.intercept_], 'r') # + [markdown] pycharm={"name": "#%% md\n"} # ## 다항회귀 # 하지만 위 그래프는 키가 0.6m 이하부터는 몸무개를 음수로 예측합니다. 왜나하면 # 위 일차함수 그래프의 y절편이 음수에 있기 때문입니다. # # 그리하여 우리는 1차함수 그래프가 아닌 데이터의 특성을 고려하여 2차함수 # 그래프로 작성해보도록 하겠습니다. # # + pycharm={"name": "#%%\n"} # 키가 0.6 이하부터는 음수로 예측합니다. print(lr.predict([[0.6]])) # + [markdown] pycharm={"name": "#%% md\n"} # ### 데이터 준비 # # + pycharm={"name": "#%%\n"} train_poly = np.column_stack((train_input ** 2, train_input)) test_poly = np.column_stack((test_input **2 , test_input)) # + pycharm={"name": "#%%\n"} print(train_poly.shape, test_poly.shape) # + [markdown] pycharm={"name": "#%% md\n"} # ### 모델 훈련시키기 # + pycharm={"name": "#%%\n"} lr = LinearRegression() lr.fit(train_poly, train_target) print(lr.score(train_poly, train_target)) lr.score(test_poly, test_target) # + [markdown] pycharm={"name": "#%% md\n"} # ### 훈련한 직선 시각화하기 # + pycharm={"name": "#%%\n"} print(lr.coef_, lr.intercept_) # + pycharm={"name": "#%%\n"} plt.scatter(train_input, train_target) plt.plot(height_array, 61.01757452 * height_array **2 -139.8618543*height_array + 125.88610143987034, 'r') plt.show()
Heights_and_weights_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # argv: # - C:/Users/<NAME>/Anaconda3\python.exe # - -m # - ipykernel_launcher # - -f # - '{connection_file}' # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nteract={"transient": {"deleting": false}} # # Long short-term memory (LSTM) is an artificial recurrent neural network (RNN) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential, load_model from keras.layers import LSTM, Dense, Dropout import warnings warnings.filterwarnings("ignore") # yahoo_finance is used to fetch data import yfinance as yf yf.pdr_override() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} # input symbol = 'AMD' start = '2014-01-01' end = '2022-01-14' # Read data df = yf.download(symbol,start,end) # View Columns df.head() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} plt.plot(df['Adj Close']) plt.title('Stock Closing Price') plt.xlabel('Date') plt.ylabel('Price') # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} df = df.reset_index() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} df = df['Adj Close'].values df = df.reshape(-1, 1) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} dataset_train = np.array(df[:int(df.shape[0]*0.8)]) dataset_test = np.array(df[int(df.shape[0]*0.8):]) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} scaler = MinMaxScaler(feature_range=(0,1)) dataset_train = scaler.fit_transform(dataset_train) dataset_test = scaler.transform(dataset_test) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} def create_dataset(df): x = [] y = [] for i in range(50, df.shape[0]): x.append(df[i-50:i, 0]) y.append(df[i, 0]) x = np.array(x) y = np.array(y) return x,y # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} x_train, y_train = create_dataset(dataset_train) x_test, y_test = create_dataset(dataset_test) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} model = Sequential() model.add(LSTM(units=96, return_sequences=True, input_shape=(x_train.shape[1], 1))) model.add(Dropout(0.2)) model.add(LSTM(units=96,return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(units=96,return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(units=96)) model.add(Dropout(0.2)) model.add(Dense(units=1)) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} model.compile(loss='mean_squared_error', optimizer='adam') # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} model.fit(x_train, y_train, epochs=50, batch_size=32) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) y_test_scaled = scaler.inverse_transform(y_test.reshape(-1, 1)) fig, ax = plt.subplots(figsize=(16,8)) ax.set_facecolor('black') ax.plot(y_test_scaled, color='red', label='Original price') plt.plot(predictions, color='cyan', label='Predicted price') plt.title('Stock price vs Predict price') plt.legend()
Stock_Algorithms/LSTM_RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append('../') # - import pandas as pd import numpy as np start_date = '2021-01-01' end_date = '2021-09-18' tic_list = ['DOGE-USD'] # + tech_indicators = ['cci_30', 'rsi_30', 'rsi_14', 'rsi_6', 'dx_30', 'dx_14'] #tech_indicators = [ # 'open_2_sma', # 'rsi_6', # 'close_2_tema'] # - from neo_finrl.data_processors.processor_yahoofinance import YahooFinanceProcessor data_downloader = YahooFinanceProcessor() # ### Extract historical px stock_history_df = data_downloader.download_data(start_date, end_date, tic_list, '1D') data_downloader.time_interval = '1D' stock_history_df = data_downloader.clean_data(stock_history_df) stock_history_df = data_downloader.add_technical_indicator(stock_history_df, tech_indicators) stock_history_df.to_csv('./DOGE_px.csv', index = False)
notebooks/crypto_data_downloader.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: sample-assessment # language: R # name: r3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/ricds/DL_RS_GEE/blob/main/DL_UNet_CropExample.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="NXnrTReCmxQA" # ### **Deep Learning with remote sensing data for image segmentation: example of rice crop mapping using Sentinel-2 images** # # <NAME>, <NAME> # # <br> # # **Goal of this notebook:**<br> # Train a deep learning model (U-Net) for image segmentation (rice crop) based on Sentinel-2 satellite images. Pre-processing and post-processing are done locally, thus in the Colab we only do the Deep Learning parts that require GPU. # # <br>**Important note:** Make sure to set your Colab notebook to *R* and *GPU* at the Runtime/type menu. # # <br>For this example we will use the basis of the U-Net model (Figure 1, Ronneberger et al., 2015), which have been used with success for many Earth Observation problems (Wagner et al., 2020a, 2020b). There are lots of other models out there you can test, such as ResNet, DeepLab, etc.<br><br> # # <img src='https://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/u-net-architecture.png' width = 800> # # Figure 1 - U-Net architecture. # # <br><br>**Contact** <br> # <NAME><br> # National Institute for Space Research (INPE)<br> # Earth Observation and Geoinformatics Division (DIOTG)<br> # São José dos Campos-SP, Brazil<br> # e-mails: <EMAIL> ; <EMAIL><br> # phone: +55 12 98208-5089<br> # https://ricds.wordpress.com/<br> # # + [markdown] id="Zv9_W6xPtBla" # ## Config # # First we install/load some packages necessary to run the deep learning. # # In Colab we don't need to install GPU or anything like that. It already comes installed for us. For applications in local computer, you need to install the GPU/CUDA, python environment, etc. all by yourself. Take a look at this link for a guide on get DL running on your computer with GPU: https://doi.org/10.5281/zenodo.3929709 # + colab={"base_uri": "https://localhost:8080/"} id="IdxR6WIQY-TI" outputId="adbe5c22-b0f8-49df-a043-91bcce47d1bf" #@title **Install packages (~4 min)** install.packages("pacman") library(pacman) # # for rgee # system('sudo add-apt-repository ppa:ubuntugis/ubuntugis-unstable') # system('sudo apt-get update') # system('sudo apt-get install libudunits2-dev libgdal-dev libgeos-dev libproj-dev') # system('sudo apt-get install libprotobuf-dev protobuf-compiler libv8-dev libjq-dev') # p_load(sf, mapview, cptcity, geojsonio) # #install.packages('sf') # #install.packages('mapview') # #install.packages('cptcity') # #install.packages('geojsonio') # #remotes::install_github("r-spatial/rgee@rgeev.1.0.3") # p_load(leaflet, rgdal, raster, sp, rgeos, gdalUtils, parallel, doParallel, foreach, fasterize, spatstat, maptools) # install packages for deep learning - ~4 min to load p_load(raster, sp) p_load(devtools, tensorflow, reticulate, keras, tfdatasets, tidyverse, rsample, rgdal) #new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] #if(length(new.packages)) install.packages(new.packages) #lapply(list.of.packages, require, character.only = TRUE) devtools::install_github("rstudio/keras") p_load(keras) # + id="UoiIQBuskZM1" #@title Download datasets # download dataset download.file('https://zenodo.org/record/5498424/files/input_training.zip?download=1', '/content/input_training.zip') download.file('https://zenodo.org/record/5498424/files/input_prediction.zip?download=1', '/content/input_prediction.zip') download.file('https://zenodo.org/record/5498480/files/grid.zip?download=1', '/content/grid.zip') download.file('https://zenodo.org/record/5504554/files/ARROZ-RS_Safra_2019_2020.zip?download=1', '/content/ARROZ-RS_Safra_2019_2020.zip') # unzip unzip(zipfile='/content/input_training.zip', exdir="5_sampling1_singledate/input") unzip(zipfile='/content/input_prediction.zip', exdir="6_sampling1_prediction_singledate/pred_input") unzip(zipfile='/content/grid.zip', exdir="4_grid") unzip(zipfile="/content/ARROZ-RS_Safra_2019_2020.zip", exdir=".") # download unet weight dir.create("/content/5_sampling1_singledate/weights_r_save/") download.file('https://zenodo.org/record/5498488/files/unet_tf2_385_0.9311.h5?download=1', '/content/5_sampling1_singledate/weights_r_save/unet_tf2_385_0.9311.h5') # + id="7fPKmgvwuB67" #@title Set some variables # set up deep learning result_fname = "rice_map_singledate" training_data_dir = "5_sampling1_singledate" prediction_data_dir = "6_sampling1_prediction_singledate" # img_dir = paste0(training_data_dir, "/input/image") class_dir = paste0(training_data_dir,"/input/class") # + id="Vls2-eY_tDmm" #@title Functions # function to extract data from a raster (x) from a SpatialPolygons object (y) faster than extract() fast_extract = function(x,y) { value = list() for (i in 1:length(y)) { tmp = crop(x, y[i,]) #value[[i]] = extract(tmp, y[i,]) value[[i]] = as.numeric(na.omit(mask(tmp, y[i,])[])) rm(tmp) } return(value) } # function to extract data from a raster (x) from a SpatialPolygons object (y) faster than extract() fast_extract_parallel = function(x,y) { # libraries needed require(parallel) # install.packages("parallel") require(doParallel) # install.packages("doParallel") require(foreach) # install.packages("foreach") # Begin cluster cl = parallel::makeCluster(detectCores()-1) # here you specify the number of processors you want to use, if you dont know you can use detectCores() and ideally use that number minus one #cl = parallel::makeCluster(3, outfile="D:/r_parallel_log.txt") # if you use this you can see prints in the txt registerDoParallel(cl) # apply the model in parallel # sometimes you need to specify in the package parameter (.packages) the name of package of the functions you are using value = foreach(i=1:length(y)) %dopar% { # note the %dopar% here require(raster) return(as.numeric(na.omit(mask(crop(x, y[i,]), y[i,])[]))) } return(value) } # function to get the sp with the extent of a raster object rasext_to_sp = function(x) { y = as(extent(x), "SpatialPolygons") crs(y) = crs(x) return(y) } # convert raster to vector using gdal_polygonize # this version accept the python path and poligonizer path separetely polygonizer_v2 <- function(x, outshape=NULL, pypath=NULL, polipath = NULL, readpoly=TRUE, fillholes=FALSE, aggregate=FALSE, quietish=TRUE) { # x: an R Raster layer, or the file path to a raster file recognised by GDAL # outshape: the path to the output shapefile (if NULL, a temporary file will # be created) # pypath: the path to gdal_polygonize.py or OSGeo4W.bat (if NULL, the function # will attempt to determine the location) # readpoly: should the polygon shapefile be read back into R, and returned by # this function? (logical) # fillholes: should holes be deleted (i.e., their area added to the containing # polygon) # aggregate: should polygons be aggregated by their associated raster value? # quietish: should (some) messages be suppressed? (logical) if (isTRUE(readpoly) || isTRUE(fillholes)) require(rgdal) #cmd <- Sys.which(paste0(pypath, '\\OSGeo4W.bat')) cmd = pypath if (is.null(pypath) | is.null(polipath)) { stop("Could not find gdal_polygonize.py or OSGeo4W on your system.") } if (!is.null(outshape)) { outshape <- sub('\\.shp$', '', outshape) f.exists <- file.exists(paste(outshape, c('shp', 'shx', 'dbf'), sep='.')) if (any(f.exists)) stop(sprintf('File already exists: %s', toString(paste(outshape, c('shp', 'shx', 'dbf'), sep='.')[f.exists])), call.=FALSE) } else outshape <- tempfile() if (is(x, 'Raster')) { require(raster) writeRaster(x, {f <- tempfile(fileext='.tif')}) rastpath <- normalizePath(f) } else if (is.character(x)) { rastpath <- normalizePath(x) } else stop('x must be a file path (character string), or a Raster object.') # system2(cmd, args=( # sprintf('"%s" "%s" %s -f "ESRI Shapefile" "%s.shp"', # pypath, rastpath, ifelse(quietish, '-q ', ''), outshape))) system2(cmd, sprintf('"%s" "%s" %s -f "ESRI Shapefile" "%s.shp"', polipath, rastpath, ifelse(quietish, '-q ', ''), outshape)) if(isTRUE(aggregate)||isTRUE(readpoly)||isTRUE(fillholes)) { shp <- readOGR(dirname(outshape), layer=basename(outshape), verbose=!quietish) } else return(NULL) if (isTRUE(fillholes)) { poly_noholes <- lapply(shp@polygons, function(x) { Filter(function(p) p@ringDir==1, x@Polygons)[[1]] }) pp <- SpatialPolygons(mapply(function(x, id) { list(Polygons(list(x), ID=id)) }, poly_noholes, row.names(shp)), proj4string=CRS(proj4string(shp))) shp <- SpatialPolygonsDataFrame(pp, shp@data) if(isTRUE(aggregate)) shp <- aggregate(shp, names(shp)) writeOGR(shp, dirname(outshape), basename(outshape), 'ESRI Shapefile', overwrite=TRUE) } if(isTRUE(aggregate) & !isTRUE(fillholes)) { shp <- aggregate(shp, names(shp)) writeOGR(shp, dirname(outshape), basename(outshape), 'ESRI Shapefile', overwrite=TRUE) } ifelse(isTRUE(readpoly), return(shp), return(NULL)) } # function from spatial.tools (does not work in R4.0.2 yet so we copied from previous version) modify_raster_margins = function (x, extent_delta = c(0, 0, 0, 0), value = NA) { x_extents <- extent(x) res_x <- res(x) x_modified <- x if (any(extent_delta < 0)) { ul_mod <- extent_delta[c(1, 3)] * res_x ul_mod[ul_mod > 0] <- 0 lr_mod <- extent_delta[c(2, 4)] * res_x lr_mod[lr_mod > 0] <- 0 crop_extent <- c(x_extents@xmin, x_extents@xmax, x_extents@ymin, x_extents@ymax) crop_extent[c(1, 3)] <- crop_extent[c(1, 3)] - ul_mod crop_extent[c(2, 4)] <- crop_extent[c(2, 4)] + lr_mod x_modified <- crop(x_modified, crop_extent) } if (any(extent_delta > 0)) { ul_mod <- extent_delta[c(1, 3)] * res_x ul_mod[ul_mod < 0] <- 0 lr_mod <- extent_delta[c(2, 4)] * res_x lr_mod[lr_mod < 0] <- 0 extend_extent <- c(x_extents@xmin, x_extents@xmax, x_extents@ymin, x_extents@ymax) extend_extent[c(1, 3)] <- extend_extent[c(1, 3)] - ul_mod extend_extent[c(2, 4)] <- extend_extent[c(2, 4)] + lr_mod x_modified <- extend(x_modified, extend_extent, value = value) } return(x_modified) } # split the extent of a sp object split_extent_gdal = function(x, block_size = 1000, na_rm = T, remove_all_zero = T, gdal_path = NULL) { if (is.null(gdal_path)) stop("Missing GDAL path.") # x = LIDAR_ANA_2017 # x = LIDAR_ST1_2016 x_ext = extent(x) # create a temporary raster within the extent with block_size as pixel size #n_x = ceiling(abs((x_ext[2] - x_ext[1])) / block_size) n_y = abs((x_ext[4] - x_ext[3])) / block_size # adjust extent to fit the cells x_ext_mod = x_ext x_ext_mod[4] = x_ext_mod[4] + ((ceiling(n_y) - n_y) * block_size) # r = raster(x_ext_mod, crs = crs(x), resolution = block_size) r[] = NA # #plot(extend(extent(r),100), asp=1) #plot(r, add=T, col="red") fname = paste0(tempfile(), ".tif") writeRaster(r, filename = fname, overwrite=T) if (inMemory(x)) { fname_x = paste0(tempfile(), "_x.tif") writeRaster(x, filename = fname_x) x = raster(fname_x) } # calculate the average of x inside the pixels #gdal_path = "C:\\GDAL_64\\" # command # gdalwarp = paste(paste0(gdal_path,"gdalwarp") # #,"-r average -wm 9999" # ,"-r average -wm 2047" # ,x<EMAIL> # ,fname # ) # system(gdalwarp) gdalUtils::gdalwarp(srcfile = <EMAIL>, dstfile = fname) # load r2 = raster(fname) #plot(r2) # convert raster to polygons - only those with values r2_pol = rasterToPolygons(r2, dissolve=F, na.rm=na_rm) # plot(r2_pol) # exclude all zero if (remove_all_zero) { idx = which(r2_pol@data[]==0) if (length(idx) > 0) r2_pol = r2_pol[-idx,] } # create extents ext_list = list() i=1 for (i in 1:length(r2_pol)) { ext_list[[i]] = extent(r2_pol[i,]) } unlink(fname) return(ext_list) } # # split the extent of a sp object split_extent_gdal_bottom = function(x, block_size = 1000, na_rm = T, remove_all_zero = T, gdal_path = NULL) { if (is.null(gdal_path)) stop("Missing GDAL path.") # x = LIDAR_ANA_2017 # x = LIDAR_ST1_2016 x_ext = extent(x) # create a temporary raster within the extent with block_size as pixel size #n_x = ceiling(abs((x_ext[2] - x_ext[1])) / block_size) n_y = abs((x_ext[4] - x_ext[3])) / block_size # adjust extent to fit the cells x_ext_mod = x_ext #x_ext_mod[4] = x_ext_mod[4] + ((ceiling(n_y) - n_y) * block_size) x_ext_mod[3] = x_ext_mod[3] - ((ceiling(n_y) - n_y) * block_size) # r = raster(x_ext_mod, crs = crs(x), resolution = block_size) r[] = NA # #plot(extend(extent(r),100), asp=1) #plot(r, add=T, col="red") fname = paste0(tempfile(), ".tif") writeRaster(r, filename = fname, overwrite=T) if (inMemory(x)) { fname_x = paste0(tempfile(), "_x.tif") writeRaster(x, filename = fname_x) x = raster(fname_x) } # calculate the average of x inside the pixels #gdal_path = "C:\\GDAL_64\\" # command gdalwarp = paste(paste0(gdal_path,"gdalwarp") #,"-r average -wm 9999" ,"-r average -wm 2047" ,x@file@name ,fname ) system(gdalwarp) # load r2 = raster(fname) #plot(r2) # convert raster to polygons - only those with values r2_pol = rasterToPolygons(r2, dissolve=F, na.rm=na_rm) # plot(r2_pol) # exclude all zero if (remove_all_zero) { idx = which(r2_pol@data[]==0) if (length(idx) > 0) r2_pol = r2_pol[-idx,] } # create extents ext_list = list() i=1 for (i in 1:length(r2_pol)) { ext_list[[i]] = extent(r2_pol[i,]) } unlink(fname) return(ext_list) } # # function to remove the last 4 digits of a string (usually the extension e.g. ".tif") and substitute it for another string sub_extension = function (x, y) { return(paste0(substr(x, 1, nchar(x)-4), y)) } # + [markdown] id="7nbz53X2vLLS" # # 1) Field samples # # In this section we download and load a dataset of rice crop locations from the CONAB Brazilian government organization for the Brazilian state of Rio Grande do Sul. This dataset and more can be found here https://portaldeinformacoes.conab.gov.br/mapeamentos-agricolas.html # # We filter the data for only one municipality (Uruguaiana) to run the experiments. # + colab={"base_uri": "https://localhost:8080/", "height": 658} id="m2On0-8ZvpxS" outputId="ba5ffb96-0e37-41c9-99b3-6a76ab00a531" #if (FALSE) { # load field data field_data = readOGR("RS_ARROZ_IRRIG_INUND_1920.shp") # lets filter these data to only one municipality, list municipalities and get only one #unique(field_data$NM_MUNICIP) # use uruguaiana as an example field_data = field_data[field_data$NM_MUNICIP == "URUGUAIANA",] print(field_data) # reproject to the same projection of the satellite data field_data = spTransform(field_data, crs("+proj=utm +zone=21 +south +datum=WGS84 +units=m +no_defs")) # visualize plot(field_data) #} # # load data # field_data_sf = st_read("RS_ARROZ_IRRIG_INUND_1920.shp") # field_data_sf = st_transform(field_data_sf, crs("+proj=utm +zone=21 +south +datum=WGS84 +units=m +no_defs")) # # use uruguaiana as an example # field_data_sf = field_data_sf[field_data_sf$NM_MUNICIP == "URUGUAIANA",] # print(field_data_sf) # plot(st_geometry(field_data_sf)) # # create convex hull around the polygons # p_load(rgeos) # field_data_convex = gConvexHull(field_data) # field_data_convex$id = 1 # plot(field_data_convex, main="convex hull") # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="DVHzlXplSzlX" outputId="8a6bc3ed-58b8-4854-e04b-9488666a950d" #@title Visualize some patch samples # list samples list_img = list.files(img_dir, pattern = "*.png", full.names = TRUE) list_mask = list.files(class_dir, pattern = "*.png", full.names = TRUE) length(list_img) # plot # i=1 i = sample(1:length(list_img), 1) r = stack(list_img[i]) plot(r) r print(i) plot(raster(list_mask[i])) # + [markdown] id="Ine1vjRy9Ec_" # # 2) Deep Learning Training # + colab={"base_uri": "https://localhost:8080/", "height": 505} id="6HZxIPYwCO4a" outputId="83a09424-eeb3-4e00-a2a9-3ec7b46bff3d" #@title DL stratify samples # libraries we're going to need later # always put reticulate and use_python as the first packages to load, or you will not be able to choose the conda env/python #library("reticulate") p_load(reticulate, keras, tfdatasets, tidyverse, rsample) #use_python("C:\\ProgramData\\Miniconda3\\envs\\r-tensorflow", required = T) #use_python("C:\\Users\\Ricardo\\Miniconda3\\envs\\r-tensorflow", required = T) #library(keras) #library(tfdatasets) #library(tidyverse) #library(rsample) #py_config() # load all data data_full <- tibble( img = list.files(img_dir, pattern = "*.png", full.names = TRUE), mask = list.files(class_dir, pattern = "*.png", full.names = TRUE) ) # random sorting of the data set.seed(10) random_order=sample(1:dim(data_full)[1],dim(data_full)[1]) data_full_reorder <- data_full[random_order,] # split the data between training and validation data_full_reorder <- initial_split(data_full_reorder, prop = 0.8) train_samples = length(data_full_reorder$in_id) train_fname = training(data_full_reorder)$img test_fname = testing(data_full_reorder)$img # (OPTIONAL, only if needed) some additional filtering # in my case i need this because i have samples from four different images with different names (img_1, img_2, img_3, img_4), so i need to pick one #img_list = grep(paste0("img_",j), img_list, value=T) #img_list = grep(paste0(transect_name,"_tchm_",j), img_list, value=T) #img_list = grep(paste0(transect_name,"_tchm"), img_list, value=T) # find the id on the name of imgs string idx_last_underline = regexpr("\\_[^\\_]*$", basename(test_fname)[1])[1] ids_validation = as.numeric(substr(basename(test_fname), idx_last_underline + 1, nchar(basename(test_fname))[1] - 4)) # find the id on the name of imgs string idx_last_underline = regexpr("\\_[^\\_]*$", basename(train_fname)[1])[1] ids_train = as.numeric(substr(basename(train_fname), idx_last_underline + 1, nchar(basename(train_fname))[1] - 4)) # get the polygons inside each block id grid = readOGR("4_grid/grid.shp") samples_patches_validation = grid[grid$id %in% ids_validation,] samples_patches_train = grid[grid$id %in% ids_train,] plot(samples_patches_train, main="train = black, validation = red") lines(samples_patches_validation, col="red") # save save(samples_patches_train, samples_patches_validation, file = "deep_learning_patch_samples_singledate.RData") # + colab={"base_uri": "https://localhost:8080/", "height": 102} id="xjYnggqwAb3p" outputId="9cfddfae-d46a-40df-c5f8-4efd5365f933" #@title DL training U-Net (~7min for 15 iterations) # parameters epochs = 15L # 400L batch_size = 32L lr_rate = 0.0001 decay_rate = 0.0001 data_n_layers = 4 ## the model # mixed precision tf$keras$mixed_precision$experimental$set_policy('mixed_float16') dice_coef <- custom_metric("custom", function(y_true, y_pred, smooth = 1.0) { y_true_f <- k_flatten(y_true) y_pred_f <- k_flatten(y_pred) intersection <- k_sum(y_true_f * y_pred_f) result <- (2 * intersection + smooth) / (k_sum(y_true_f) + k_sum(y_pred_f) + smooth) return(result) }) bce_dice_loss <- function(y_true, y_pred) { result <- loss_binary_crossentropy(y_true, y_pred) + (1 - dice_coef(y_true, y_pred)) return(result) } # get_unet_128 <- function(input_shape = c(128, 128, data_n_layers), num_classes = 1) { inputs <- layer_input(shape = input_shape) # 128 down1 <- inputs %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") down1_pool <- down1 %>% layer_max_pooling_2d(pool_size = c(2, 2), strides = c(2, 2)) # 64 down2 <- down1_pool %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") down2_pool <- down2 %>% layer_max_pooling_2d(pool_size = c(2, 2), strides = c(2, 2)) # 32 down3 <- down2_pool %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") down3_pool <- down3 %>% layer_max_pooling_2d(pool_size = c(2, 2), strides = c(2, 2)) # 16 down4 <- down3_pool %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") down4_pool <- down4 %>% layer_max_pooling_2d(pool_size = c(2, 2), strides = c(2, 2)) # 8 center <- down4_pool %>% layer_conv_2d(filters = 1024, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 1024, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # center up4 <- center %>% layer_upsampling_2d(size = c(2, 2)) %>% {layer_concatenate(inputs = list(down4, .), axis = 3)} %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # 16 up3 <- up4 %>% layer_upsampling_2d(size = c(2, 2)) %>% {layer_concatenate(inputs = list(down3, .), axis = 3)} %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # 32 up2 <- up3 %>% layer_upsampling_2d(size = c(2, 2)) %>% {layer_concatenate(inputs = list(down2, .), axis = 3)} %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # 64 up1 <- up2 %>% layer_upsampling_2d(size = c(2, 2)) %>% {layer_concatenate(inputs = list(down1, .), axis = 3)} %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # 128 classify <- layer_conv_2d(up1, filters = num_classes, kernel_size = c(1, 1), dtype = 'float32', # mixed precision activation = "sigmoid") model <- keras_model( inputs = inputs, outputs = classify ) model %>% compile( optimizer = optimizer_rmsprop(lr = lr_rate, decay = decay_rate), loss = bce_dice_loss, #loss = bce_dice_loss_flooding, metrics = c(dice_coef) ) return(model) } # model <- get_unet_128() ## data augmentation # random brightness, contrast, hue random_bsh <- function(img) { img <- img %>% tf$image$random_brightness(max_delta = 0.2) %>% tf$image$random_contrast(lower = 0.9, upper = 1.1) # %>% # img <- tf$math$multiply(img,tf$random$uniform(shape = shape(1L), minval = 0.4 ,maxval = 1.6 ,dtype = tf$float32)) # for RGB # img_aug <- img[,,1:3] %>% # tf$image$random_saturation(lower = 0.9, upper = 1.1) %>% # tf$image$random_hue(max_delta = 0.2) #%>% # # img <- tf$keras$backend$concatenate( # list(img_aug[,,1:3,drop=FALSE],img[,,4,drop=FALSE]), axis=-1L # ) %>% tf$clip_by_value(0, 1) } random_flip_up_down <- function(x,y) { tf$cond(tf$less(y , 0.25) , function() tf$image$flip_up_down(x), function() x) } random_flip_left_right <- function(x,y) { tf$cond(tf$greater(y , 0.75), function() tf$image$flip_left_right(x), function() x) } # map data create_dataset <- function(data, train, batch_size = 8L, data_n_layers = 3) { dataset <- data %>% mutate(rot = ifelse(runif(dim(data)[1])>0.75,1,0)*runif(dim(data)[1],min=0,max=2*pi)) %>% tensor_slices_dataset() %>% dataset_map(~.x %>% list_modify( img = tf$image$decode_png(tf$io$read_file(.x$img),channels=data_n_layers), mask = tf$image$decode_png(tf$io$read_file(.x$mask),channels=1) )) %>% dataset_map(~.x %>% list_modify( img = tf$image$convert_image_dtype(.x$img, dtype = tf$float32), #mask = tf$image$convert_image_dtype(.x$mask, dtype = tf$uint8) mask = tf$image$convert_image_dtype(.x$mask, dtype = tf$float32) )) # set rot variable to a random uniform value dataset <- dataset %>% dataset_map(~.x %>% list_modify( rot =tf$random$uniform(shape = shape(1L), minval = 0 ,maxval = 1 ,dtype = tf$float32) )) # apply up/down and left/right flip conditioned by rot dataset <- dataset %>% dataset_map(~.x %>% list_modify( img = random_flip_up_down(x=.x$img,y=.x$rot), mask = random_flip_up_down(x=.x$mask,y=.x$rot) )) %>% dataset_map(~.x %>% list_modify( img = random_flip_left_right(x=.x$img,y=.x$rot), mask = random_flip_left_right(x=.x$mask,y=.x$rot) )) # data augmentation performed on training set only if (train) { dataset <- dataset %>% dataset_map(~.x %>% list_modify( img = random_bsh(.x$img) )) } # shuffling on training set only if (train) { dataset <- dataset %>% # dataset_shuffle(buffer_size = batch_size*128) # dataset_shuffle(buffer_size = batch_size*128,seed=666,reshuffle_each_iteration=FALSE) } # train in batches; batch size might need to be adapted depending on # available memory dataset <- dataset %>% dataset_batch(batch_size) dataset %>% # output needs to be unnamed dataset_map(unname) } ## Train # Training and test set creation now is just a matter of two function calls. training_dataset <- create_dataset(training(data_full_reorder), train = TRUE, data_n_layers = data_n_layers) validation_dataset <- create_dataset(testing(data_full_reorder), train = FALSE, data_n_layers = data_n_layers) # callbacks dir.create(paste0(training_data_dir, "/epoch_history/"), showWarnings=F) dir.create(paste0(training_data_dir, "/weights/"), showWarnings=F) dir.create(paste0(training_data_dir, "/weights_r_save/"), showWarnings=F) callbacks_list <- list( callback_csv_logger(paste0(training_data_dir, "/epoch_history/epoch_history.csv"), separator = ";", append = FALSE), callback_model_checkpoint(filepath = paste0(training_data_dir, "/weights/unet_tf2_{epoch:03d}_{val_custom:.4f}.h5"), monitor = "val_custom",save_best_only = TRUE, save_weights_only = TRUE, mode = "max" ,save_freq = NULL) ) ## start training from a set of weights # load_model_weights_hdf5(model, "./weights_r_save/unet_tf2_111_0.6083_noBSH.h5") # example # train training_dataset <- dataset_repeat(training_dataset, count = epochs) model_history = fit_generator(model,training_dataset,validation_data = validation_dataset, workers = 1, steps_per_epoch = as.integer(train_samples / batch_size), epochs = epochs,callbacks = callbacks_list) # how to print to console? # https://stackoverflow.com/questions/37689694/real-time-printing-to-console-with-r-in-jupyter # clear GPU tf$keras.backend$clear_session() py_gc <- import('gc') py_gc$collect() # + colab={"base_uri": "https://localhost:8080/", "height": 471} id="-ewuj6GLEu8z" outputId="3662a2ad-c70c-4823-e1b1-d049e2bf9552" #@title Visualize model performance during training # visualize the acc plot(model_history) # + [markdown] id="wi9WNoj5GJgA" # # 3) DL Prediction # # We apply the best model to the whole image. This cell crops the image in multiple patches, apply the model, and then mosaic everything back together as a single image. # # For prediction, we increase the size of the patches to make prediction faster. The size of this prediction is more or less *the size that fits in the memory*. So we use 640x640 (prediction) instead of 128x128 (training). # + id="lEt1SRe-BIf4" #@title Prepare to predict # location of the model weights file weights_fname = "5_sampling1_singledate/weights_r_save/unet_tf2_385_0.9311.h5" # best weight # libraries we're going to need later # always put reticulate and use_python as the first packages to load, or you will not be able to choose the conda env/python #library("reticulate") p_load(reticulate, keras, tfdatasets, tidyverse, rsample) #use_python("C:\\ProgramData\\Miniconda3\\envs\\r-tensorflow", required = T) #use_python("C:\\Users\\Ricardo\\Miniconda3\\envs\\r-tensorflow", required = T) #library(keras) #library(tfdatasets) #library(tidyverse) #library(rsample) #py_config() ## Config # config batch_size = 4 data_n_layers = 4 lr_rate = 0.0001 # raster opts dir.create(paste0(prediction_data_dir, "/tmp"), showWarnings = F) rasterOptions(tmpdir="tmp") #rasterOptions(maxmemory = 5e+10) #rasterOptions(chunksize = 1e+09) # path to the gdal files and to the osgeo .bat (to run gdal_polygonize) #gdal_path = "C:\\OSGeo4W\\bin\\" #osgeo_path = "C:\\OSGeo4W" # create dirs dir.create(paste0(prediction_data_dir, "/pred_input"), showWarnings = F, recursive=T) dir.create(paste0(prediction_data_dir,"/pred_output"), showWarnings = F) dir.create(paste0(prediction_data_dir,"/pred_mosaic"), showWarnings = F, recursive=T) dir.create(paste0(prediction_data_dir,"/pred_vector"), showWarnings = F, recursive=T) # clear some folders before starting #unlink(list.files(paste0(prediction_data_dir, "/pred_input/"), full.names = TRUE)) unlink(list.files(paste0(prediction_data_dir, "/pred_output/"), full.names = TRUE)) unlink(list.files(paste0(prediction_data_dir, "/tmp/"), full.names = TRUE)) # load data #img_list = list.files("2_Images", full.names=T) # load #predictor_data = stack(img_list) #predictor_data_expand = stack(img_list)[[bands_to_use]] # predictor_data_expand = stack("s2_blank.tif") # # change NA to 0 # predictor_data[is.na(predictor_data),] = 0 # add borders #predictor_data_expand = modify_raster_margins(predictor_data, extent_delta=c(64,64,64,64),value=0) ## not needed i think # # adjust image to a multiple of 512 - for easier cropping # n_row_tiles = ceiling(nrow(predictor_data)/512) # n_row_add = (n_row_tiles * 512) - nrow(predictor_data) # n_col_tiles = ceiling(ncol(predictor_data)/512) # n_col_add = (n_col_tiles * 512) - ncol(predictor_data) # predictor_data_expand = modify_raster_margins(predictor_data, extent_delta=c(0,n_col_add,0,n_row_add),value=0) # # define the extents # block_size = 512*10 # #predictor_data_ext = split_extent_gdal_bottom(x = predictor_data_expand[[1]], block_size = block_size, na_rm = F, remove_all_zero = F, gdal_path = gdal_path) # p_load(gdalUtils) # predictor_data_ext = split_extent_gdal_border_adj(x =predictor_data_expand, block_size = block_size, na_rm = T, remove_all_zero = F, gdal_path = "") # length(predictor_data_ext) # + id="k7DrbFNYKatm" #@title predict all tiles (~7 min) # load data test_dir <- paste0(prediction_data_dir, "/pred_input/") list_png=list.files(test_dir, pattern = "*.png$", full.names = TRUE) length(list_png) data <- tibble( img = list_png ) ## the model # mixed precision tf$keras$mixed_precision$experimental$set_policy('mixed_float16') dice_coef <- custom_metric("custom", function(y_true, y_pred, smooth = 1.0) { y_true_f <- k_flatten(y_true) y_pred_f <- k_flatten(y_pred) intersection <- k_sum(y_true_f * y_pred_f) result <- (2 * intersection + smooth) / (k_sum(y_true_f) + k_sum(y_pred_f) + smooth) return(result) }) bce_dice_loss <- function(y_true, y_pred) { result <- loss_binary_crossentropy(y_true, y_pred) + (1 - dice_coef(y_true, y_pred)) return(result) } get_unet_128 <- function(input_shape = c(640, 640, data_n_layers), num_classes = 1) { inputs <- layer_input(shape = input_shape) # 128 down1 <- inputs %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") down1_pool <- down1 %>% layer_max_pooling_2d(pool_size = c(2, 2), strides = c(2, 2)) # 64 down2 <- down1_pool %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") down2_pool <- down2 %>% layer_max_pooling_2d(pool_size = c(2, 2), strides = c(2, 2)) # 32 down3 <- down2_pool %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") down3_pool <- down3 %>% layer_max_pooling_2d(pool_size = c(2, 2), strides = c(2, 2)) # 16 down4 <- down3_pool %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") down4_pool <- down4 %>% layer_max_pooling_2d(pool_size = c(2, 2), strides = c(2, 2)) # 8 center <- down4_pool %>% layer_conv_2d(filters = 1024, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 1024, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # center up4 <- center %>% layer_upsampling_2d(size = c(2, 2)) %>% {layer_concatenate(inputs = list(down4, .), axis = 3)} %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 512, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # 16 up3 <- up4 %>% layer_upsampling_2d(size = c(2, 2)) %>% {layer_concatenate(inputs = list(down3, .), axis = 3)} %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 256, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # 32 up2 <- up3 %>% layer_upsampling_2d(size = c(2, 2)) %>% {layer_concatenate(inputs = list(down2, .), axis = 3)} %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 128, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # 64 up1 <- up2 %>% layer_upsampling_2d(size = c(2, 2)) %>% {layer_concatenate(inputs = list(down1, .), axis = 3)} %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") %>% layer_conv_2d(filters = 64, kernel_size = c(3, 3), padding = "same") %>% layer_batch_normalization() %>% layer_activation("relu") # 128 classify <- layer_conv_2d(up1, filters = num_classes, kernel_size = c(1, 1), dtype = 'float32', # mixed precision activation = "sigmoid") model <- keras_model( inputs = inputs, outputs = classify ) model %>% compile( optimizer = optimizer_rmsprop(lr = lr_rate), loss = bce_dice_loss, metrics = c(dice_coef) ) return(model) } model <- get_unet_128() # map data create_dataset <- function(data, batch_size = 4L, data_n_layers = 3) { dataset <- data %>% tensor_slices_dataset() %>% dataset_map(~.x %>% list_modify( img = tf$image$decode_png(tf$io$read_file(.x$img), channels=data_n_layers), )) %>% dataset_map(~.x %>% list_modify( img = tf$image$convert_image_dtype(.x$img, dtype = tf$float32), )) # train in batches; batch size might need to be adapted depending on # available memory dataset <- dataset %>% dataset_batch(batch_size) dataset %>% # output needs to be unnamed dataset_map(unname) } ## Predict ## load saved weights load_model_weights_hdf5(model, weights_fname) # predict test_dataset <- create_dataset(data, data_n_layers = data_n_layers, batch_size = batch_size) system.time({ preds <- predict(model, test_dataset) }) print("Prediction end.") # clear GPU tf$keras.backend$clear_session() py_gc <- import('gc') py_gc$collect() # dim(preds) # save the results in a file save(preds, file = "preds.RData") # + [markdown] id="KU5EfNaroYEl" # # **4) Quick visualization** # # After prediction, we can take a look at some examples here below. # + colab={"base_uri": "https://localhost:8080/", "height": 874} id="QWyBCgvWRz_Z" outputId="b028a72f-f9b4-41f2-95a8-70db0064612b" #@title Visualize some random examples # list files list_png=list.files(test_dir, pattern = "*.png$", full.names = TRUE) length(list_png) # plot some example #i= 20 i = sample(1:length(list_png), 1) # random example plot(stack(list_png[i])[[1]]) image(t(apply(preds[i,,,], 2, rev))) # + [markdown] id="dHFJf0xAB4A3" # # Final considerations # # After predicting we need to download the *preds.RData* file to the local computer to make the final map. We don't do it here because it requires the original imagery (which are heavy) to get the original geospatial coordinates (because we used PNG), and a few GDAL functions which I did not have enough time to test it here. Further ways to improve this code would be to use TIF files instead of PNG so we do not lose the coordinates and its easier to make the outputs.<BR><BR> # # I hope you learned something about training/predicting a Deep Learning model with Remote Sensing imagery. :) # # Any questions, feel free to contact, contact information is at the top of the page. Welcome to Deep Learning! # + [markdown] id="5uOZ-uFWr-58" # # **References** # # <NAME>., <NAME>., & <NAME>. (2015). U-net: Convolutional networks for biomedical image segmentation. Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 9351, 234–241. https://doi.org/10.1007/978-3-319-24574-4_28 # # <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020). Regional Mapping and Spatial Distribution Analysis of Canopy Palms in an Amazon Forest Using Deep Learning and VHR Images. Remote Sensing, 12(14), 2225. https://doi.org/10.3390/rs12142225 # # <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020). U-net-id, an instance segmentation model for building extraction from satellite images-Case study in the Joanopolis City, Brazil. Remote Sensing, 12(10), 1–14. https://doi.org/10.3390/rs12101544
DL_UNet_CropExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="2HTTGuFUI487" colab_type="code" colab={} import os import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.preprocessing import Normalizer # + id="Y8fpUOVQ5i_n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="c91373c2-8099-4502-9c84-f54ce2f9c2cf" image = plt.imread(os.path.join("images", "ladybug.png")) plt.imshow(image) image.shape # + id="GCPEBkPtLmXg" colab_type="code" colab={} X = image.reshape(-1, 3) # + id="bo8gWu9FWwoV" colab_type="code" colab={} segmented_imgs = [] n_colors = (10, 8, 6, 4, 2) for n_clusters in n_colors: kmeans = KMeans(n_clusters=n_clusters, random_state=42).fit(X) segmented_img = kmeans.cluster_centers_[kmeans.labels_] segmented_imgs.append(segmented_img.reshape(image.shape)) # + id="yyVFDsmCW0Nr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="f9da0119-a8e5-406b-fe56-fc96a70502a7" plt.figure(figsize=(10,5)) plt.subplots_adjust(wspace=0.05, hspace=0.1) plt.subplot(231) plt.imshow(image) plt.title("Original image") plt.axis('off') for idx, n_clusters in enumerate(n_colors): plt.subplot(232 + idx) plt.imshow(segmented_imgs[idx]) plt.title("{} colors".format(n_clusters)) plt.axis('off') plt.show()
color_segmentation_com_kmeans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Preprocessing Using Dataflow # # **Learning Objectives** # - Creating datasets for Machine Learning using Dataflow # # ## Introduction # # While Pandas is fine for experimenting, for operationalization of your workflow, it is better to do preprocessing in Apache Beam. This will also help if you need to preprocess data in flight, since Apache Beam also allows for streaming. # # ### Set environment variables and load necessary libraries # Execute the following cells to install the necessary libraries if they have not been installed already. #Ensure that we have Apache Beam version installed. # !pip freeze | grep apache-beam || sudo pip install apache-beam[gcp]==2.12.0 import tensorflow as tf import apache_beam as beam import shutil import os print(tf.__version__) # Next, set the environment variables related to your GCP Project. PROJECT = "cloud-training-demos" # Replace with your PROJECT BUCKET = "cloud-training-bucket" # Replace with your BUCKET REGION = "us-central1" # Choose an available region for Cloud MLE TFVERSION = "1.14" # TF version for CMLE to use import os os.environ["BUCKET"] = BUCKET os.environ["PROJECT"] = PROJECT os.environ["REGION"] = REGION # + language="bash" # if ! gsutil ls | grep -q gs://${BUCKET}/; then # gsutil mb -l ${REGION} gs://${BUCKET} # fi # - # ## Save the query from earlier # # The data is natality data (record of births in the US). My goal is to predict the baby's weight given a number of factors about the pregnancy and the baby's mother. Later, we will want to split the data into training and eval datasets. The hash of the year-month will be used for that. # Create SQL query using natality data after the year 2000 query_string = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 """ # #### **Excerise 1** # # Use the `query_string` we defined above to call BigQuery and create a local Pandas dataframe. Look at the documentation for calling BigQuery within a Jupyter notebook if you need to remind yourself its usage. # # **Hint**: it might help to add a `LIMIT` to the query string to control the size of the resulting dataframe. # + # Call BigQuery and examine in dataframe from google.cloud import bigquery bq = # TODO: Your code goes here df = # TODO: Your code goes here df.head() # - # ## Create ML dataset using Dataflow # # Let's use Cloud Dataflow to read in the BigQuery data, do some preprocessing, and write it out as CSV files. # # Instead of using Beam/Dataflow, I had three other options: # # * Use Cloud Dataprep to visually author a Dataflow pipeline. Cloud Dataprep also allows me to explore the data, so we could have avoided much of the handcoding of Python/Seaborn calls above as well! # * Read from BigQuery directly using TensorFlow. # * Use the BigQuery console (http://bigquery.cloud.google.com) to run a Query and save the result as a CSV file. For larger datasets, you may have to select the option to "allow large results" and save the result into a CSV file on Google Cloud Storage. # # However, in this case, I want to do some preprocessing, modifying data so that we can simulate what is known if no ultrasound has been performed. If I didn't need preprocessing, I could have used the web console. Also, I prefer to script it out rather than run queries on the user interface, so I am using Cloud Dataflow for the preprocessing. # # The `preprocess` function below includes an arugment `in_test_mode`. When this is set to `True`, running `preprocess` initiates a *local* Beam job. This is helpful for quickly debugging your pipeline and ensuring it works before submitting a job to the Cloud. Setting `in_test_mode` to `False` will launch a processing that is happening on the Cloud. Go to the GCP webconsole to [the Dataflow section](https://pantheon.corp.google.com/dataflow) and monitor the running job. It took about 20 minutes for me. # # If you wish to continue without doing this step, you can copy my preprocessed output: # <pre> # gsutil -m cp -r gs://cloud-training-demos/babyweight/preproc gs://YOUR_BUCKET/ # </pre> # #### **Exercise 2** # # The cell block below contains a collection of TODOs that will complete the pipeline for processing the baby weight dataset with Apache Beam and Cloud Dataflow. # # In the first block of TODOs we use the original dataset to create synthetic data where we assume no ultrasound has been performed. Look back to the [`2_sample.ipynb`](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/asl/courses/machine_learning/deepdive/05_review/2_sample.ipynb) notebook to remind yourself how this was done. # Note, these operations are done on the row level as that is how the data will be processed in the pipeline via the map function. # # The next block of TODOs comes at the bottom of the cell, where we actually create the preprocessing pipeline. There are three TODOs for you to complete # - First, read in the data from BigQuery using the `selquery` created before using the `beam.io.Read` functionality # - Next, use `beam.FlatMap` to apply the `to_csv` function you modified in the previous TODOs # - Lastly, write the preprocessed records to the predetermined `OUTPUT_DIR` using `beam.io.Write` functionality. # # Look at the documentation for Beam to remind yourself the correct usage of these operations. # + import apache_beam as beam import datetime, os def to_csv(rowdict): # Pull columns from BQ and create a line import hashlib import copy CSV_COLUMNS = "weight_pounds,is_male,mother_age,plurality,gestation_weeks".split(',') # Create synthetic data where we assume that no ultrasound has been performed # and so we don"t know sex of the baby. Let"s assume that we can tell the difference # between single and multiple, but that the errors rates in determining exact number # is difficult in the absence of an ultrasound. no_ultrasound = copy.deepcopy(rowdict) w_ultrasound = copy.deepcopy(rowdict) no_ultrasound["is_male"] = # TODO: Your code goes here if rowdict["plurality"] > 1: no_ultrasound["plurality"] = # TODO: Your code goes here else: no_ultrasound["plurality"] = # TODO: Your code goes here # Change the plurality column to strings w_ultrasound["plurality"] = ["Single(1)", "Twins(2)", "Triplets(3)", "Quadruplets(4)", "Quintuplets(5)"][rowdict["plurality"] - 1] # Write out two rows for each input row, one with ultrasound and one without for result in [no_ultrasound, w_ultrasound]: data = ','.join([str(result[k]) if k in result else "None" for k in CSV_COLUMNS]) yield str("{}".format(data)) def preprocess(in_test_mode): import shutil, os, subprocess job_name = "preprocess-babyweight-features" + "-" + datetime.datetime.now().strftime("%y%m%d-%H%M%S") if in_test_mode: print("Launching local job ... hang on") OUTPUT_DIR = "./preproc" shutil.rmtree(OUTPUT_DIR, ignore_errors=True) os.makedirs(OUTPUT_DIR) else: print("Launching Dataflow job {} ... hang on".format(job_name)) OUTPUT_DIR = "gs://{0}/babyweight/preproc/".format(BUCKET) try: subprocess.check_call("gsutil -m rm -r {}".format(OUTPUT_DIR).split()) except: pass options = { "staging_location": os.path.join(OUTPUT_DIR, "tmp", "staging"), "temp_location": os.path.join(OUTPUT_DIR, "tmp"), "job_name": job_name, "project": PROJECT, "teardown_policy": "TEARDOWN_ALWAYS", "no_save_main_session": True } opts = beam.pipeline.PipelineOptions(flags = [], **options) if in_test_mode: RUNNER = "DirectRunner" else: RUNNER = "DataflowRunner" p = beam.Pipeline(RUNNER, options = opts) query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 AND weight_pounds > 0 AND mother_age > 0 AND plurality > 0 AND gestation_weeks > 0 AND month > 0 """ if in_test_mode: query = query + " LIMIT 100" for step in ["train", "eval"]: if step == "train": selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) < 80".format(query) elif step == "eval": selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) >= 80 AND ABS(MOD(hashmonth, 100)) < 90".format(query) else: selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) >= 90".format(query) (p | "{}_read".format(step) >> # TODO: Your code goes here | "{}_csv".format(step) >> # TODO: Your code goes here | "{}_out".format(step) >> # TODO: Your code goes here ) job = p.run() if in_test_mode: job.wait_until_finish() print("Done!") preprocess(in_test_mode = True) # - # For a Cloud preprocessing job (i.e. setting `in_test_mode` to `False`), the above step will take 20+ minutes. Go to the GCP web console, navigate to the Dataflow section and <b>wait for the job to finish</b> before you run the follwing step. # # ## View results # We can have a look at the elements in our bucket to see the results of our pipeline above. # !gsutil ls gs://$BUCKET/babyweight/preproc/*-00000* # # Preprocessing with BigQuery # Create SQL query for BigQuery that will union all both the ultrasound and no ultrasound datasets. # #### **Exercise 3** # # The cell block below contains a collection of TODOs that will complete the query for processing the baby weight dataset with BigQuery. # # In the block of TODOs we use the original dataset to create synthetic data where we assume no ultrasound has been performed. Look back to the [`2_sample.ipynb`](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/asl/courses/machine_learning/deepdive/05_review/2_sample.ipynb) notebook to remind yourself how this was done. query = """ WITH CTE_Raw_Data AS ( SELECT weight_pounds, CAST(is_male AS STRING) AS is_male, mother_age, plurality, gestation_weeks, FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 AND weight_pounds > 0 AND mother_age > 0 AND plurality > 0 AND gestation_weeks > 0 AND month > 0) -- Ultrasound SELECT weight_pounds, is_male, mother_age, CASE # TODO Convert plurality from integers to strings END AS plurality, gestation_weeks, hashmonth FROM CTE_Raw_Data UNION ALL -- No ultrasound SELECT weight_pounds, # TODO Mask is_male mother_age, CASE # TODO Convert plurality from integers to strings and mask plurality > 1 END AS plurality, gestation_weeks, hashmonth FROM CTE_Raw_Data """ # Create temporary BigQuery dataset # + from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() # Set dataset_id to the ID of the dataset to create. dataset_name = "temp_babyweight_dataset" dataset_id = "{}.{}".format(client.project, dataset_name) # Construct a full Dataset object to send to the API. dataset = bigquery.Dataset.from_string(dataset_id) # Specify the geographic location where the dataset should reside. dataset.location = "US" # Send the dataset to the API for creation. # Raises google.api_core.exceptions.Conflict if the Dataset already # exists within the project. try: dataset = client.create_dataset(dataset) # API request print("Created dataset {}.{}".format(client.project, dataset.dataset_id)) except: print("Dataset {}.{} already exists".format(client.project, dataset.dataset_id)) # - # Execute query and write to BigQuery table. job_config = bigquery.QueryJobConfig() for step in ["train", "eval"]: if step == "train": selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) < 80".format(query) elif step == "eval": selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) >= 80 AND ABS(MOD(hashmonth, 100)) < 90".format(query) else: selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) >= 90".format(query) # Set the destination table table_name = "babyweight_{}".format(step) table_ref = client.dataset(dataset_name).table(table_name) job_config.destination = table_ref job_config.write_disposition = "WRITE_TRUNCATE" # Start the query, passing in the extra configuration. query_job = client.query( query=selquery, # Location must match that of the dataset(s) referenced in the query # and of the destination table. location="US", job_config=job_config) # API request - starts the query query_job.result() # Waits for the query to finish print("Query results loaded to table {}".format(table_ref.path)) # Export BigQuery table to CSV in GCS. # + dataset_ref = client.dataset(dataset_id=dataset_name, project=PROJECT) for step in ["train", "eval"]: destination_uri = "gs://{}/{}".format(BUCKET, "babyweight/bq_data/{}*.csv".format(step)) table_name = "babyweight_{}".format(step) table_ref = dataset_ref.table(table_name) extract_job = client.extract_table( table_ref, destination_uri, # Location must match that of the source table. location="US", ) # API request extract_job.result() # Waits for job to complete. print("Exported {}:{}.{} to {}".format(PROJECT, dataset_name, table_name, destination_uri)) # - # ## View results # We can have a look at the elements in our bucket to see the results of our pipeline above. # !gsutil ls gs://$BUCKET/babyweight/bq_data/*000000000000* # Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
courses/machine_learning/deepdive/05_review/labs/4_preproc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Probabilistic Matrix Factorization # ## Introduction # In this notebook, we implement probabilistic matrix factorization (PMF) on movie rating dataset. # # Matrix factorization is a method to decompose a matrix into multiplication of matrices (usually two matrices). A matrix can represent a dyadic data. Matrix factorization is a way to better understand the components (dyads), and use them in further tasks. The goal is to find those unknown matrices. # # Probabilistic matrix factorization (PMF) is a probabilistic extension of matrix factorization where unknown parameters are described with their probability distributions. Similar to matrix factorization, the goal of PMF is to learn embeddings from dyadic/relational data (each matrix entry is a dyad, e.g., user-item rating, document-word count, user-user link). If the matrix has missing values, by learning decomposition based on observed entries, missing values can be pre dicted. This technique is useful in variety of applications such as recommender systems/collaborative filtering, link prediction in social network, and so on. In this project, are going to use PMF in collaborating filtering. The goal of collab orative filtering is to infer user preferences for items given a large but incomplete collection of preferences for many users. If we let each row of the original matrix represent a user, and each column represent a product, we can interpret the entries of the matrix to be the rating given to a product by a user. # # By learning latent features for users and movies using the seen ratings, we are able to predict the ratings for the unseen entries. # ### Model # For more clarification, we show the model in the context of movie ratings. Let $R$ be the matrix of ratings which is partially (and sparsely) filled by users' ratings for movies. Let us assume that there are $N$ users and $M$ movies in total. The goal is to find a decomposition for $R$ such that: # $$R \approx U^T V$$ # where $U$ is a $d\times N$ matrix describing users and $V$ is a $d\times M$ matrix describing movies. # # Figure below shows an illustration of this model where each column of $U$ and $V$ learn a representation for the users and movies respectively. # # <img src="matrix_fact.png",style="max-width:100%; width: 50%"> # # # Given feature vectors for $U_i$ user and $V_j$, the distribution of the corresponding rating is: # $$R(i,j) \sim N(U_i^TV_j , \sigma^2)$$ # $$U_i \sim N(0, \sigma_u^2 I) ~~~~ V_j \sim N(0, \sigma_v^2 I)$$ # # The graphical model of it is: # # <img src="graph-model.png",height: 20> # # Our objective is to find $U$ and $V$ such that they have the maximum probabillity to generate seen ratings $R$. This method is called Maximum a Posteriori (MAP) estimation. Note that $\lambda$ is a tuning parameter resulting in more stability. Let $D$ represent set of all observed data such that: $D~: ~\{(i,j) ~| ~R(i,j)~is ~given\}$. Then the objective is: # # $$\underset{U,V}{\max} ~ \log p(U,V|R) $$ # $$\underset{U,V}{\max} ~ L(U,V)=-\frac{1}{2\sigma^2} \sum_{(i,j)\in D} {(R_{ij} - U_i^{\intercal} V_j)}^2 - \frac{\lambda}{2} \sum_{i=1}^{M} {||U_i||}^2 - \frac{\lambda}{2} \sum_{j=1}^{N} {||V_j||}^2$$ # # The optimization algorithm is as follows: # # - Initialize $U^{(0)}$ and $V^{(0)}$ # - For $t = 1:T$ # - $U^{(t)}_i = {\left[ \lambda I + \sum_{j:(i,j)\in D} \frac{1}{\sigma^2} {V^{(t-1)}_j}^{\intercal}V^{(t-1)}_j \right]}^{-1} \left(\sum_{j:(i,j)\in D} \frac{1}{\sigma^2}R_{ij} ~ V^{(t-1)}_j \right)$ # - $V^{(t)}_j = {\left[ \lambda I + \sum_{i:(i,j)\in D} \frac{1}{\sigma^2} {U^{(t)}_i}^{\intercal} U^{(t)}_i \right]}^{-1} \left(\sum_{i:(i,j)\in D} \frac{1}{\sigma^2}R_{ij} ~ U^{(t)}_i \right) # $ # # - End for. # # # Now, the first step is to import the data. # # You need to load your data, and put it in a matrix called $R$. Make sure to convert the ratings into floats - not categorical variables. The goal for you is to complete the matrix and find the missing entries. To make calculations easier, create a new matrix with 3 columns. Put the row indices (user ids) in the first row, and column indices (movie ids) in second column, and let the third column show the ratings. # Here is an implementation of it for MovieLens dataset. # + ## Load your data here... # example with MovieLens data: import numpy as np import random import pandas as pd prefer = [] o = open('data/u.data.txt', 'r') for line in o.readlines(): (userid, movieid, rating, ts) = line.split('\t') uid = int(userid) mid = int(movieid) rat = float(rating) prefer.append([uid, mid, rat]) #data = array(prefer) o.close() #print(line) #print(prefer[-5:]) # - data = array(prefer) N = len(unique(data[:,0])) # number of users M = len(unique(data[:,1])) # number of movies print("In this dataset, there are {} users and {} movies\objects.".format(N,M)) # + print("there are {} given ratings total.".format(len(data))) print("Here is the first 10 rows in my processesed data") data[:10] # - # Now you need to run your algorithm. Note that at each sep, you need to update all of the movies' features as well as all of the users' features. You can set lambda = 0.1, and variances to be 1. Note that these parameter choices are up to you, and feel free to tune them the way you want. You can set the number of iterations to be 100. # + active="" # # - ## Initialize U and V randomly. Note that U should be d by N and V should be d by M. d = 5 lam = 0.1 U = np.random.normal(size=(d,N)) V = np.random.normal(size=(d,M)) print(U) # np.random.multivariate_normal # To make the calculations easier, you should keep track of some values at each step. Note that to update vector for a user, you need to have access to all the movies rated by that user (the same for the movies; to update vector for a movie, you need the id of all of the users that have rated that movie.) # Since these don't change, we suggest you to construct a dictionary for each user as key, and id of movies rated by that user to be the values (as a list). You should do the same for movies. # These are only some suggestions, feel free to use any method that you like. :) # + ###### you can construct two dictionaries one for the user and one for the movies here. ## hint: you can use defaultdict from collections import defaultdict # u_dict = defaultdict(list) for i, j in zip(data[:,0],zip(data[:,1],data[:,2])): u_dict[i].append(j) m_dict = defaultdict(list) for i, j in zip(data[:,1],zip(data[:,0],data[:,2])): m_dict[i].append(j) print(u_list[196]) print(m_list[393]) # - # Again, to update feature vector of a user, you need to compute ${V^{(t-1)}_j}^{\intercal}V^{(t-1)}_j$. We suggest you to ceate an array for all of the movies, and update these values at the end of each iteration. That way, you just need to look up the dictionary for the movie ids that a user has rated, and use those indices of your constructed vector. # You need to do the same for the movies. # + ## Constructing arrays that will make your life easier in the implementation of the algorithm .... #print(V[:,:4]) #print(V[:,1]) def outer_col_list(inp_array,col=1): """returns a list of outer products of cols with themselves """ outer_list = [] for i in arange(inp_array.shape[col]): _v = array(inp_array[:,i]) outer_list.append(np.outer(_v.T,_v)) return outer_list #vtv = outer_col_list(V) #utu = outer_col_list(U) #print(utu[:1]) # + from numpy.linalg import inv ## Implementation of the algorithm ... # make my dxd identity matrix reg = lam*np.identity(5) #print(reg) def mykernel(reg,j_dict,matrix_list,matrix): update = zeros(matrix.shape) #print(matrix.shape) for key,val in j_dict.items(): matrix_sum = zeros(matrix_list[0].shape) vector_sum = zeros(matrix.shape[0]) for i in val: matrix_sum = matrix_sum + matrix_list[int(i[0])-1] vector_sum = vector_sum + i[1]*matrix[:,int(i[0]-1)] update[:,int(key)-1] = dot(inv(reg + matrix_sum),vector_sum) # print(int(i[0]),len(matrix_list)) return array(update) v = V.copy() vtv = outer_col_list(v) for i in arange(20): u = mykernel(reg,dict(m_dict),vtv,v) utu = outer_col_list(u) print(u.shape,len(utu),utu[0].shape) v = mykernel(reg,dict(u_dict),utu,u) vtv = outer_col_list(v) # print(v.shape,vtv.shape) # - print(data[:3]) print(np.inner(u[:,195].T,v[:,241])) print(np.inner(u[:,185].T,v[:,301])) print(np.inner(u[:,21].T,v[:,376])) # Now that you found the optimal U and V matrices, you can predict for the missing ratings for users. For example, show 10 movies with the highest predicted ratings for 5 of the users of your choice. # # + ## e.g.: If you were a movie recommending system, what would you recommend to user number 19 to watch that hasn't watched before? # - # ### Reference: # 1- <NAME>., & <NAME>. (2007). Probabilistic matrix factorization. In Advances in neural information processing systems (pp. 1257-1264). https://papers.nips.cc/paper/3208-probabilistic-matrix-factorization.pdf #
Bootcamp-materials/notebooks/capstones/Movie-Reviews-PMF/Group-Projects/mainPMF-team2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Think Bayes # # Copyright 2018 <NAME> # # MIT License: https://opensource.org/licenses/MIT # + # Configure Jupyter so figures appear in the notebook # %matplotlib inline # Configure Jupyter to display the assigned value after an assignment # %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' import numpy as np # import classes from thinkbayes2 from thinkbayes2 import Pmf, Cdf, Suite, Joint import thinkplot # - # ## Bayesian regression # # This notebook presents a simple example of Bayesian regression using sythetic data # # ### Data # # Suppose there is a linear relationship between `x` and `y` with slope 2 and intercept 1, but the measurements of `y` are noisy; specifically, the noise is Gaussian with mean 0 and `sigma = 0.3`. slope = 2 inter = 1 sigma = 0.3 xs = np.linspace(0, 1, 6) ys = inter + slope * xs + np.random.normal(0, sigma, len(xs)) thinkplot.plot(xs, ys) thinkplot.decorate(xlabel='x', ylabel='y') # ### Grid algorithm # # We can solve the problem first using a grid algorithm, with uniform priors for slope, intercept, and sigma. # # As an exercise, fill in this likelihood function, then test it using the code below. # # Your results will depend on the random data you generated, but in general you should find that the posterior marginal distributions peak near the actual parameters. # + from scipy.stats import norm class Regress(Suite, Joint): def Likelihood(self, data, hypo): """ data: x, y hypo: slope, inter, sigma """ return 1 # + # Solution goes here # - params = np.linspace(-4, 4, 21) sigmas = np.linspace(0.1, 2, 20) from itertools import product hypos = product(params, params, sigmas) suite = Regress(hypos); for data in zip(xs, ys): suite.Update(data) thinkplot.Pdf(suite.Marginal(0)) thinkplot.decorate(xlabel='Slope', ylabel='PMF', title='Posterior marginal distribution') thinkplot.Pdf(suite.Marginal(1)) thinkplot.decorate(xlabel='Intercept', ylabel='PMF', title='Posterior marginal distribution') thinkplot.Pdf(suite.Marginal(2)) thinkplot.decorate(xlabel='Sigma', ylabel='PMF', title='Posterior marginal distribution') # ### MCMC # # Implement this model using MCMC. As a starting place, you can use this example from [Computational Statistics in Python](http://people.duke.edu/~ccc14/sta-663-2016/16C_PyMC3.html#Linear-regression). # # You also have the option of using the GLM module, [described here](https://docs.pymc.io/notebooks/GLM-linear.html). import pymc3 as pm pm.GLM thinkplot.plot(xs, ys) thinkplot.decorate(xlabel='x', ylabel='y') # + import pymc3 as pm with pm.Model() as model: """Fill this in""" # + # Solution goes here # + # Solution goes here # + # Solution goes here # + # Solution goes here # - # The posterior distributions for these parameters should be similar to what we got with the grid algorithm.
examples/regress.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lesson 6: pets revisited # + # %reload_ext autoreload # %autoreload 2 # %matplotlib inline from fastai.vision import * # - # Set a batch size of 64. # Untar the data at `URLs.PETS`, and set the variable `path` to the returned path with `images` appended to the end. # ## Data augmentation # Create a variable `tfms` that captures the output of the `get_transforms` function, with the following arguments: # - max_rotate=20 # - max_zoom=1.3 # - max_lighting=0.4 # - max_warp=0.4 # - p_affine=1. # - p_lighting=1. # Explain what each of these are. # Create an `ImageList` from the folder `path` split by a random 20% (using a random seed of 2). Assign it to the variable `src`. What kind of object does this return? Can you find (in code) why this is the case? # Write a function `get_data` that takes `src` and labels it using the regex `([^/]+)_\d+.jpg$`, transforms it with `tfms`, takes `size` as an argument, takes `padding_mode` as an argument that defaults to `reflection`, creates a databunch with batch size `bs`, and normalizes using the imagenet stats. What type would you expect this to return? # Create a variable `data` that calls `get_data` with size 224, `bs=bs`, and padding type `zeros`. # Write a function `_plot` that plots the fourth image in the training dataset. Pass it to `plot_multi` to create a 3x3 grid of augmented images. # Create a new variable `data` with size 224 and the same bs. # data = get_data(224,bs) # Use the same process to plot a 3x3 grid of 8x8 images of augmented data. This time allow the default padding mode. # ## Train a model # Call `gc.collect`. Can you explain what this does? # Create a `cnn_learner` named `learn` with data `data`, architecture resnet34, using the `error_rate` metric, and `bn_final` set to true. # Fit a cycle with 3 epochs, a slice up to 1e-2, with `pct_start=0.8`. Can you explain what `pct_start` does? # Unfreeze the neural net. Fit another cycle with two epochs under the slice (1e-6, 1e-3). Same pct_start. # Create a new `data` object with size 352. # Train for another cycle with 2 epochs, this time with a `max_lr` of `slice(1e-6, 1e-4)`. # Save the model under the name `352`. # ## Convolution kernel # Create another new `data` with size 352 and batch size 16. # Create a new learner `learn` with the same specs as earlier, and load the weights from `352` to it. # Set the variable `idx=0`. Set the values returned at position `idx` within the valid_ds and to `x` and `y`. # Call the `show` method on x. # Return the item at position `idx` in the `y` part of the `valid_ds`. # This is created for you, because it doesn't teach much. Maybe dig into the `expand` method. # Return the shape of `k`. k.shape # Get the `x` value of the first item in `valid_ds`, get the `data` property and set it to `t`. What does the data property represent? # Add a new dimension to `t` using the `None` index syntax. # Create an image called `edge` by convolving `t` with our filter `k`. # Run `show_image` over `edge`. Hint: you'll have to get the zeroth index of `edge` -- why? # Show the number of classes in `data`. # Print the model. # Print a model summary. # ## Heatmap # Get the model out of our learner and set it to `eval` mode. # Get one item from the `x` data you created above. Call this `xb`. Hint: `one_item` returns a tuple, but we only need the first thing. # Create an image from a denormed version of xb. Again, you'll have to index into this. Be sure you can explain why. Call the output `xb_im`. # Put the `xb` variable on the GPU by calling `cuda()`. # Import fastai.callbacks.hooks. # Create a function `hooked_backward` that returns two objects `grad_a` and `grad_g` representing the activations and the gradients. Make sure to use `with` statements here so that the hooks are removed after we get our results. # Create two objects, `hook_a` and `hook_g` with the outputs of `hooked_backward`. # Assign the stored activation outputs to a variable called `acts`. Make sure to call `.cpu` to put this back on the CPU. # Take an average over the channel dimension to get a 2d shape. Print out the shape. # Write a function `show_heatmap` that does the following: # - takes an argument hm # - Creates a new matplotlib axis using `plt.subplots` # - shows `xb_im` on the new axis # - calls `ax.imshow` with arguments `alpha=0.6`, `extent=(0,352,352,0)`, `interpolation=bilinear`, `cmap=magma`. Look up what these mean. # Call `show_heatmap` on `avg_acts`. # ## Grad-CAM # Paper: [Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization](https://arxiv.org/abs/1610.02391) grad = hook_g.stored[0][0].cpu() grad_chan = grad.mean(1).mean(1) grad.shape,grad_chan.shape mult = (acts*grad_chan[...,None,None]).mean(0) show_heatmap(mult) fn = path/'../other/bulldog_maine.jpg' #Replace with your own image x = open_image(fn); x xb,_ = data.one_item(x) xb_im = Image(data.denorm(xb)[0]) xb = xb.cuda() hook_a,hook_g = hooked_backward() # + acts = hook_a.stored[0].cpu() grad = hook_g.stored[0][0].cpu() grad_chan = grad.mean(1).mean(1) mult = (acts*grad_chan[...,None,None]).mean(0) # - show_heatmap(mult) data.classes[0] hook_a,hook_g = hooked_backward(0) # + acts = hook_a.stored[0].cpu() grad = hook_g.stored[0][0].cpu() grad_chan = grad.mean(1).mean(1) mult = (acts*grad_chan[...,None,None]).mean(0) # - show_heatmap(mult) # ## fin
nbs/dl1/lesson6-pets-more-ex.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook 3: Bayesian Statistics # # [Bayesian Decision Analysis](https://allendowney.github.io/BayesianDecisionAnalysis/) # # Copyright 2021 <NAME> # # License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) import numpy as np import pandas as pd import matplotlib.pyplot as plt # ## Review # # In the previous notebook we extended the cookie problem with more bowls and I introduced an alternative to the Bayes table, a probability mass function (PMF), which is a useful way to represent and do computations with distributions. # # Here's the function I used to create a `Pmf`, given a sequence of quantities, `qs`, and the corresponding probabilities, `ps`. def make_pmf(qs, ps, **options): """Make a Series that represents a PMF. qs: sequence of quantities ps: sequence of probabilities options: keyword arguments passed to Series constructor returns: Pandas Series """ pmf = pd.Series(ps, index=qs, **options) return pmf # And here's the function that performs a Bayesian update, given a sequence of likelihoods: def bayes_update(pmf, likelihood): """Do a Bayesian update. pmf: Series that represents the prior likelihood: sequence of likelihoods """ pmf *= likelihood pmf /= pmf.sum() # We'll use these functions to solve a new problem similar to the cookie problem. # ## The Euro problem # # Here's a problem from <NAME>ay's book, [*Information Theory, Inference, and Learning Algorithms*](http://www.inference.org.uk/mackay/itila/p0.html), which is the book where I first learned about Bayesian statistics. MacKay writes: # # > A statistical statement appeared in The Guardian on # Friday January 4, 2002: # > # > >"When spun on edge 250 times, a Belgian one-euro coin came # up heads 140 times and tails 110. ‘It looks very suspicious # to me’, said <NAME>, a statistics lecturer at the London # School of Economics. ‘If the coin were unbiased the chance of # getting a result as extreme as that would be less than 7%’." # > # > But [asks MacKay] do these data give evidence that the coin is biased rather than fair? # # To answer this question, we have to make some modeling choices. # # * First, let's assume that if you spin a coin on edge, there is some probability that it will land heads up. I'll call that probability $x$. # # * Second, let's assume that $x$ varies from one coin to the next, depending on how the coin is balanced and maybe some other factors. # With these assumptions we can formulate MacKay's question as an inference problem: given the data --- 140 heads and 110 tails --- what do we think $x$ is for this coin? # # This formulation is similar to the 101 Bowls problem we saw in the previous notebook; in fact, we will use the same likelihoods. # # But in the 101 Bowls problem, we are told that we choose a bowl at random, which implies that all bowls have the same prior probability. # # For the Euro problem, we have to think harder. What values of $x$ do you think are reasonable? # # It seems likely that many coins are "fair", meaning that the probability of heads is close to 50%. Do you think there are coins where $x$ is 75%? How about 90%? # # To be honest, I don't really know. To get started, I will assume that all values of $x$, from 0% to 100%, are equally likely. Then we'll come back and try another prior. # # Here's a uniform prior from 0 to 100. xs = np.arange(101) prior = 1/101 pmf = make_pmf(xs, prior) # Here are the likelihoods for heads and tails: likelihood_heads = xs / 100 likelihood_tails = 1 - xs / 100 # And here are the updates for 140 heads and 110 tails. # Here's what the results look like: # + pmf.plot() plt.xlabel('Possible values of x') plt.ylabel('Probability') plt.title('140 heads, 110 tails'); # - # This curve shows the "posterior distribution" of $x$. # ## Put it in a function # # Before we go on, let's put that update in a function, because we are going to need it again. def bayes_update_euro(pmf, data): """Do a Bayesian update. pmf: Series that represents a prior PMF data: tuple of number of heads, tails """ heads, tails = data xs = pmf.index likelihood_heads = xs / 100 likelihood_tails = 1 - likelihood_heads for i in range(heads): bayes_update(pmf, likelihood_heads) for i in range(tails): bayes_update(pmf, likelihood_tails) # This function takes a PMF that represents the prior, and a tuple that contains the number of heads and tails. # # Here's the uniform prior again. xs = np.arange(101) prior = 1/101 uniform = make_pmf(xs, prior) # Here's the update. data = 140, 110 bayes_update_euro(uniform, data) # And here are the results again. # + uniform.plot() plt.xlabel('Possible values of x') plt.ylabel('Probability') plt.title('140 heads, 110 tails'); # - # ## A better prior # # Remember that this result is based on a uniform prior, which assumes that any value of $x$ from 0 to 100 is equally likely. # # Given what we know about coins, that's probabily not true. I can believe that if you spin a lop-sided coin on edge, it might be somewhat more likely to land on heads or tails. # # But unless the coin is heavily weighted on one side, I would be surprised if $x$ were greater than 60% or less than 40%. # # Of course, I could be wrong, but in general I would expect to find $x$ closer to 50%, and I would be surprised to find it near 0% or 100%. # # I can represent that prior believe with a triangle-shaped prior. # # Here's an array that ramps up from 0 to 49 and ramps down from 50 to 0. # I'll put it in a PMF and normalize it so it adds up to 1. # Here's what the triangle prior looks like. # + triangle.plot(color='C1') plt.xlabel('Possible values of x') plt.ylabel('Probability') plt.title('Triangle prior'); # - # Now let's update it with the data. # And plot the results, along with the posterior based on a uniform prior. # + uniform.plot(label='Uniform') triangle.plot(label='Triangle') plt.xlabel('Possible values of x') plt.ylabel('Probability') plt.title('140 heads, 110 tails') plt.legend(); # - # The posterior distributions are almost identical because, in this case, we have enough data to "swamp the prior"; that is, the posteriors depend strongly on the data and only weakly on the priors. # # This is good news, because it suggests that we can use data to resolve arguments. Suppose two people disagree about the correct prior. If neither can persuade the other, they might have to agree to disagree. # # But if they get new data, and each of them does a Bayesian update, they will usually find their beliefs converging. # # And with enough data, the remaining difference can be so small that it makes no difference in practice. # ## Summarizing the posterior distribution # # The posterior distribution contains all of the information we have about the value of $x$. But sometimes we want to summarize this information. # # We have already seen one way to summarize a posterior distribution, the Maximum Aposteori Probability, or MAP: uniform.idxmax() # `idxmax` returns the value of $x$ with the highest probability. # # In this example, we get the same MAP with the triangle prior: triangle.idxmax() # Another way to summarize the posterior distribution is the posterior mean. # # Given a set of quantities, $q_i$, and the corresponding probabilities, $p_i$, the mean of the distribution is: # # $\sum_i q_i p_i$ # # The following function takes a Pmf and computes its mean. Note that this function only works correctly if the Pmf is normalized. # Here's the posterior mean based on the uniform prior: pmf_mean(uniform) # And here's the posterior mean with the triangle prior: pmf_mean(triangle) # The posterior means are not identical, but they are close enough that the difference probably doesn't matter. # # In this example, the posterior mean is very close to the MAP. That's true when the posterior distribution is symmetric, but it is not always true. # # If someone asks what we think $x$ is, the MAP or the posterior mean might be a good answer. # ## Posterior probability # # If the coin is "fair", that means that $x$ is 50%. So it might be tempting to use the posterior PMF to compute the probability that $x$ is 50%: uniform[50] # The result is the posterior probability that $x$ is 50%, but it is not the probability that the coin is fair. # # The problem is that $x$ is really a continuous quantity, which means it could have any value between 0 and 1. # # For purposes of computation, I broke this interval into 101 discrete values, but that was an arbitrary choice. I could have done the computation with 201 hypotheses, like this: # + xs2 = np.linspace(0, 100, 201) prior2 = 1/201 uniform2 = make_pmf(xs2, prior2) len(uniform2) # - # Here's the update. bayes_update_euro(uniform2, data) # And here's what the results look like. # + uniform2.plot(color='C2') plt.xlabel('201 possible values of x') plt.ylabel('Probability') plt.title('140 heads, 110 tails'); # - # The results are visually similar, but you might notice that the curve is a little smoother at the peak. # # The MAPs are the same and the posterior means are almost the same: uniform.idxmax(), uniform2.idxmax() pmf_mean(uniform), pmf_mean(uniform2) # But the total probability is spread out over twice as many hypotheses, so the proability of any single hypothesis is smaller. # # If use both posteriors to compute the probability that $x$ is 50%, we get very different results. uniform[50], uniform2[50] # Because $x$ is continuous, we divided the interval into discrete values. But the number of values was an arbitrary choice, so the probability of any single value is not meaningful. # # However, we can meaningfully compute the probability that $x$ falls in an interval. # ## Credible intervals # # The following function takes a Pmf and an interval from `low` to `high`; it computes the total probability of all quantities in the interval (excluding `low` and including `high`). def prob_between(pmf, low, high): between = (low < pmf.index) & (pmf.index <= high) total = pmf[between].sum() return total # We can use it to compute the probability that $x$ is between 50 and 60, based on the uniform prior with 201 values. prob_between(uniform2, 50, 60) # The probability that $x$ is between 50 and 60 is about 88%. # # An interval like this is called a "credible interval" because it tells us how credible it is that $x$ falls in the interval. # In this example, I chose the quantities, 50 and 60, and computed the posterior probability of the values between them. # # We can also go the other way: given a probability like 88%, we could find two quantities that have that much probability between them. # To make that work in general, we have to do some interpolation, which is what the following function does. # + from scipy.interpolate import interp1d def credible_interval(pmf, prob): """Compute the mean of a PMF. pmf: Series representing a PMF prob: probability of the interval return: pair of float """ # make the CDF xs = pmf.index ys = pmf.cumsum() # compute the probabilities p = (1-prob)/2 ps = [p, 1-p] # interpolate the inverse CDF options = dict(bounds_error=False, fill_value=(xs[0], xs[-1]), assume_sorted=True) interp = interp1d(ys, xs, **options) return interp(ps) # - # The details of this function are not important right now, but we can confirm that it works as expected: credible_interval(uniform, 0.88) # ## Summary # # In this lesson, we used data from a coin-spinning experiment to estimate the probability that a given coin lands on heads. # # We tried three different priors: uniform distributions with 101 and 201 values, and a triangle distribution. The results are similar, which indicates that we have enough data to "swamp the priors". # # And we summarized the posterior distributions three ways, computing the value with Maximum Aposteori Probability (MAP), the posterior mean, and a credible interval. # ## Exercise # # Suppose a rookie baseball player gets 3 hits out of 3 at bats during their first game. What do we think their long-term batting average will be? # # To answer this question, we have to make some modeling choices: # # * Let's assume that each player has some constant batting average that is their probability of getting a hit during any at bat. # # * As a prior distribution, let's use a normal distribution with mean 0.260 and standard deviation 0.033. # # We can use `scipy.stats.norm` to evaluate the normal distribution for a range of batting averages, like this: # + from scipy.stats import norm mean = 0.26 std = 0.033 xs = np.linspace(0, 0.5, 101) ps = norm(mean, std).pdf(xs) # - # We can put these quantities and their probabilities in a Pmf, like this: prior = make_pmf(xs, ps) prior /= prior.sum() # Here's what the prior distribution of batting averages looks like. # + prior.plot(color='gray', label='prior') plt.xlabel('Batting average') plt.ylabel('Probability') plt.title('Distribution of batting averages') plt.legend(); # - # 1. Compute the likelihood of getting 3 hits. # # 2. Compute the posterior distribution for this player's batting average. # # 3. Plot the prior and posterior distributions. # # 4. Compute the prior and posterior means; how much higher is the posterior mean?
03_euro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Generation of tables and figures of MRIQC paper # # This notebook is associated to the paper: # # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>; MRIQC: Predicting Quality in Manual MRI Assessment Protocols Using No-Reference Image Quality Measures; bioRxiv 111294; doi:[10.1101/111294](https://doi.org/10.1101/111294). # + # %matplotlib inline # %load_ext autoreload # %autoreload 2 import os.path as op import numpy as np import pandas as pd from pkg_resources import resource_filename as pkgrf from mriqc.viz import misc as mviz from mriqc.classifier.data import read_dataset, combine_datasets # Where the outputs should be saved outputs_path = '../../mriqc-data/' # Path to ABIDE's BIDS structure abide_path = '/home/oesteban/Data/ABIDE/' # Path to DS030's BIDS structure ds030_path = '/home/oesteban/Data/ds030/' # - # Read some data (from mriqc package) # + x_path = pkgrf('mriqc', 'data/csv/x_abide.csv') y_path = pkgrf('mriqc', 'data/csv/y_abide.csv') ds030_x_path = pkgrf('mriqc', 'data/csv/x_ds030.csv') ds030_y_path = pkgrf('mriqc', 'data/csv/y_ds030.csv') rater_types = {'rater_1': float, 'rater_2': float, 'rater_3': float} mdata = pd.read_csv(y_path, index_col=False, dtype=rater_types) sites = list(sorted(list(set(mdata.site.values.ravel().tolist())))) # - # ## Figure 1: artifacts in MRI # Shows a couple of subpar datasets from the ABIDE dataset out_file = op.join(outputs_path, 'figures', 'fig01-artifacts.svg') mviz.figure1( op.join(abide_path, 'sub-50137', 'anat', 'sub-50137_T1w.nii.gz'), op.join(abide_path, 'sub-50110', 'anat', 'sub-50110_T1w.nii.gz'), out_file) out_file_pdf = out_file[:4] + '.pdf' # !rsvg-convert -f pdf -o $out_file_pdf $out_file # ## Figure 2: batch effects # # This code was use to generate the second figure # + from mriqc.classifier.sklearn import preprocessing as mcsp # Concatenate ABIDE & DS030 fulldata = combine_datasets([ (x_path, y_path, 'ABIDE'), (ds030_x_path, ds030_y_path, 'DS030'), ]) # Names of all features features =[ 'cjv', 'cnr', 'efc', 'fber', 'fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z', 'icvs_csf', 'icvs_gm', 'icvs_wm', 'inu_med', 'inu_range', 'qi_1', 'qi_2', 'rpve_csf', 'rpve_gm', 'rpve_wm', 'size_x', 'size_y', 'size_z', 'snr_csf', 'snr_gm', 'snr_total', 'snr_wm', 'snrd_csf', 'snrd_gm', 'snrd_total', 'snrd_wm', 'spacing_x', 'spacing_y', 'spacing_z', 'summary_bg_k', 'summary_bg_mad', 'summary_bg_mean', 'summary_bg_median', 'summary_bg_n', 'summary_bg_p05', 'summary_bg_p95', 'summary_bg_stdv', 'summary_csf_k', 'summary_csf_mad', 'summary_csf_mean', 'summary_csf_median', 'summary_csf_n', 'summary_csf_p05', 'summary_csf_p95', 'summary_csf_stdv', 'summary_gm_k', 'summary_gm_mad', 'summary_gm_mean', 'summary_gm_median', 'summary_gm_n', 'summary_gm_p05', 'summary_gm_p95', 'summary_gm_stdv', 'summary_wm_k', 'summary_wm_mad', 'summary_wm_mean', 'summary_wm_median', 'summary_wm_n', 'summary_wm_p05', 'summary_wm_p95', 'summary_wm_stdv', 'tpm_overlap_csf', 'tpm_overlap_gm', 'tpm_overlap_wm', 'wm2max' ] # Names of features that can be normalized coi = [ 'cjv', 'cnr', 'efc', 'fber', 'fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z', 'snr_csf', 'snr_gm', 'snr_total', 'snr_wm', 'snrd_csf', 'snrd_gm', 'snrd_total', 'snrd_wm', 'summary_csf_mad', 'summary_csf_mean', 'summary_csf_median', 'summary_csf_p05', 'summary_csf_p95', 'summary_csf_stdv', 'summary_gm_k', 'summary_gm_mad', 'summary_gm_mean', 'summary_gm_median', 'summary_gm_p05', 'summary_gm_p95', 'summary_gm_stdv', 'summary_wm_k', 'summary_wm_mad', 'summary_wm_mean', 'summary_wm_median', 'summary_wm_p05', 'summary_wm_p95', 'summary_wm_stdv' ] # Plot batches fig = mviz.plot_batches(fulldata, cols=list(reversed(coi)), out_file=op.join(outputs_path, 'figures/fig02-batches-a.pdf')) # Apply new site-wise scaler scaler = mcsp.BatchRobustScaler(by='site', columns=coi) scaled = scaler.fit_transform(fulldata) fig = mviz.plot_batches(scaled, cols=coi, site_labels='right', out_file=op.join(outputs_path, 'figures/fig02-batches-b.pdf')) # - # ## Figure 3: Inter-rater variability # # In this figure we evaluate the inter-observer agreement between both raters on the 100 data points overlapping of ABIDE. Also the Cohen's Kappa is computed. # + from sklearn.metrics import cohen_kappa_score overlap = mdata[np.all(~np.isnan(mdata[['rater_1', 'rater_2']]), axis=1)] y1 = overlap.rater_1.values.ravel().tolist() y2 = overlap.rater_2.values.ravel().tolist() fig = mviz.inter_rater_variability(y1, y2, out_file=op.join(outputs_path, 'figures', 'fig02-irv.pdf')) print("Cohen's Kappa %f" % cohen_kappa_score(y1, y2)) y1 = overlap.rater_1.values.ravel() y1[y1 == 0] = 1 y2 = overlap.rater_2.values.ravel() y2[y2 == 0] = 1 print("Cohen's Kappa (binarized): %f" % cohen_kappa_score(y1, y2)) # - # ## Figure 5: Model selection # + import matplotlib.pyplot as plt import seaborn as sn rfc_acc=[0.842, 0.815, 0.648, 0.609, 0.789, 0.761, 0.893, 0.833, 0.842, 0.767, 0.806, 0.850, 0.878, 0.798, 0.559, 0.881, 0.375] svc_lin_acc=[0.947, 0.667, 0.870, 0.734, 0.754, 0.701, 0.750, 0.639, 0.877, 0.767, 0.500, 0.475, 0.837, 0.768, 0.717, 0.050, 0.429] svc_rbf_acc=[0.947, 0.852, 0.500, 0.578, 0.772, 0.712, 0.821, 0.583, 0.912, 0.767, 0.500, 0.450, 0.837, 0.778, 0.441, 0.950, 0.339] df = pd.DataFrame({ 'site': list(range(len(sites))) * 3, 'accuracy': rfc_acc + svc_lin_acc + svc_rbf_acc, 'Model': ['RFC'] * len(sites) + ['SVC_lin'] * len(sites) + ['SVC_rbf'] * len(sites) }) x = np.arange(len(sites)) data = list(zip(rfc_acc, svc_lin_acc, svc_rbf_acc)) dim = len(data[0]) w = 0.81 dimw = w / dim colors = ['dodgerblue', 'orange', 'darkorange'] allvals = [rfc_acc, svc_lin_acc, svc_rbf_acc] fig = plt.figure(figsize=(10, 3)) ax2 = plt.subplot2grid((1, 4), (0, 3)) plot = sn.violinplot(data=df, x='Model', y="accuracy", ax=ax2, palette=colors, bw=.1, linewidth=.7) for i in range(dim): ax2.axhline(np.average(allvals[i]), ls='--', color=colors[i], lw=.8) # ax2.axhline(np.percentile(allvals[i], 50), ls='--', color=colors[i], lw=.8) # sn.swarmplot(x="model", y="accuracy", data=df, color="w", alpha=.5, ax=ax2); ax2.yaxis.tick_right() ax2.set_ylabel('') ax2.set_xticklabels(ax2.get_xticklabels(), rotation=40) ax2.set_ylim([0.0, 1.0]) ax1 = plt.subplot2grid((1, 4), (0, 0), colspan=3) for i in range(dim): y = [d[i] for d in data] b = ax1.bar(x + i * dimw, y, dimw, bottom=0.001, color=colors[i], alpha=.6) print(np.average(allvals[i]), np.std(allvals[i])) ax1.axhline(np.average(allvals[i]), ls='--', color=colors[i], lw=.8) plt.xlim([-0.2, 16.75]) plt.grid(False) _ = plt.xticks(np.arange(0, 17) + 0.33, sites, rotation='vertical') ax1.set_ylim([0.0, 1.0]) ax1.set_ylabel('Accuracy (ACC)') fig.savefig(op.join(outputs_path, 'figures/fig05-acc.pdf'), bbox_inches='tight', dpi=300) # + rfc_roc_auc=[0.597, 0.380, 0.857, 0.610, 0.698, 0.692, 0.963, 0.898, 0.772, 0.596, 0.873, 0.729, 0.784, 0.860, 0.751, 0.900, 0.489] svc_lin_roc_auc=[0.583, 0.304, 0.943, 0.668, 0.691, 0.754, 1.000, 0.778, 0.847, 0.590, 0.857, 0.604, 0.604, 0.838, 0.447, 0.650, 0.501] svc_rbf_roc_auc=[0.681, 0.217, 0.827, 0.553, 0.738, 0.616, 0.889, 0.813, 0.845, 0.658, 0.779, 0.493, 0.726, 0.510, 0.544, 0.500, 0.447] df = pd.DataFrame({ 'site': list(range(len(sites))) * 3, 'auc': rfc_roc_auc + svc_lin_roc_auc + svc_rbf_roc_auc, 'Model': ['RFC'] * len(sites) + ['SVC_lin'] * len(sites) + ['SVC_rbf'] * len(sites) }) x = np.arange(len(sites)) data = list(zip(rfc_roc_auc, svc_lin_roc_auc, svc_rbf_roc_auc)) dim = len(data[0]) w = 0.81 dimw = w / dim colors = ['dodgerblue', 'orange', 'darkorange'] allvals = [rfc_roc_auc, svc_lin_roc_auc, svc_rbf_roc_auc] fig = plt.figure(figsize=(10, 3)) ax2 = plt.subplot2grid((1, 4), (0, 3)) plot = sn.violinplot(data=df, x='Model', y="auc", ax=ax2, palette=colors, bw=.1, linewidth=.7) for i in range(dim): ax2.axhline(np.average(allvals[i]), ls='--', color=colors[i], lw=.8) ax2.yaxis.tick_right() ax2.set_ylabel('') ax2.set_xticklabels(ax2.get_xticklabels(), rotation=40) ax2.set_ylim([0.0, 1.0]) ax1 = plt.subplot2grid((1, 4), (0, 0), colspan=3) for i in range(dim): y = [d[i] for d in data] b = ax1.bar(x + i * dimw, y, dimw, bottom=0.001, color=colors[i], alpha=.6) print(np.average(allvals[i]), np.std(allvals[i])) ax1.axhline(np.average(allvals[i]), ls='--', color=colors[i], lw=.8) plt.xlim([-0.2, 16.75]) plt.grid(False) _ = plt.xticks(np.arange(0, 17) + 0.33, sites, rotation='vertical') ax1.set_ylim([0.0, 1.0]) ax1.set_ylabel('Area under the curve (AUC)') fig.savefig(op.join(outputs_path, 'figures/fig05-auc.pdf'), bbox_inches='tight', dpi=300) # - # ## Evaluation on DS030 # # This section deals with the results obtained on DS030. # # ### Table 4: Confusion matrix # + from sklearn.metrics import confusion_matrix pred_file = op.abspath(op.join( '..', 'mriqc/data/csv', 'mclf_run-20170724-191452_mod-rfc_ver-0.9.7-rc8_class-2_cv-loso_data-test_pred.csv')) pred_y = pd.read_csv(pred_file) true_y = pd.read_csv(ds030_y_path) true_y.rater_1 *= -1 true_y.rater_1[true_y.rater_1 < 0] = 0 print(confusion_matrix(true_y.rater_1.tolist(), pred_y.pred_y.values.ravel().tolist(), labels=[0, 1])) # - # ### Figure 6A: Feature importances # + import seaborn as sn from sklearn.externals.joblib import load as loadpkl sn.set_style("white") # Get the RFC estimator = loadpkl(pkgrf('mriqc', 'data/mclf_run-20170724-191452_mod-rfc_ver-0.9.7-rc8_class-2_cv-loso_data-train_estimator.pklz')) forest = estimator.named_steps['rfc'] # Features selected in cross-validation features = [ "cjv", "cnr", "efc", "fber", "fwhm_avg", "fwhm_x", "fwhm_y", "fwhm_z", "icvs_csf", "icvs_gm", "icvs_wm", "qi_1", "qi_2", "rpve_csf", "rpve_gm", "rpve_wm", "snr_csf", "snr_gm", "snr_total", "snr_wm", "snrd_csf", "snrd_gm", "snrd_total", "snrd_wm", "summary_bg_k", "summary_bg_stdv", "summary_csf_k", "summary_csf_mad", "summary_csf_mean", "summary_csf_median", "summary_csf_p05", "summary_csf_p95", "summary_csf_stdv", "summary_gm_k", "summary_gm_mad", "summary_gm_mean", "summary_gm_median", "summary_gm_p05", "summary_gm_p95", "summary_gm_stdv", "summary_wm_k", "summary_wm_mad", "summary_wm_mean", "summary_wm_median", "summary_wm_p05", "summary_wm_p95", "summary_wm_stdv", "tpm_overlap_csf", "tpm_overlap_gm", "tpm_overlap_wm"] nft = len(features) forest = estimator.named_steps['rfc'] importances = np.median([tree.feature_importances_ for tree in forest.estimators_], axis=0) # importances = np.median(, axis=0) indices = np.argsort(importances)[::-1] df = {'Feature': [], 'Importance': []} for tree in forest.estimators_: for i in indices: df['Feature'] += [features[i]] df['Importance'] += [tree.feature_importances_[i]] fig = plt.figure(figsize=(20, 6)) # plt.title("Feature importance plot") sn.boxplot(x='Feature', y='Importance', data=pd.DataFrame(df), linewidth=1, notch=True) plt.xlabel('Features selected (%d)' % len(features)) # plt.bar(range(nft), importances[indices], # color="r", yerr=std[indices], align="center") plt.xticks(range(nft)) plt.gca().set_xticklabels([features[i] for i in indices], rotation=90) plt.xlim([-1, nft]) plt.show() fig.savefig(op.join(outputs_path, 'figures', 'fig06-exp2-fi.pdf'), bbox_inches='tight', pad_inches=0, dpi=300) # - # ### Figure 6B: Misclassified images of DS030 fn = ['10225', '10235', '10316', '10339', '10365', '10376', '10429', '10460', '10506', '10527', '10530', '10624', '10696', '10891', '10948', '10968', '10977', '11050', '11052', '11142', '11143', '11149', '50004', '50005', '50008', '50010', '50016', '50027', '50029', '50033', '50034', '50036', '50043', '50047', '50049', '50053', '50054', '50055', '50085', '60006', '60010', '60012', '60014', '60016', '60021', '60046', '60052', '60072', '60073', '60084', '60087', '70051', '70060', '70072'] fp = ['10280', '10455', '10523', '11112', '50020', '50048', '50052', '50061', '50073', '60077'] fn_clear = [ ('10316', 98), ('10968', 122), ('11050', 110), ('11149', 111) ] import matplotlib.pyplot as plt from mriqc.viz.utils import plot_slice import nibabel as nb for im, z in fn_clear: image_path = op.join(ds030_path, 'sub-%s' % im, 'anat', 'sub-%s_T1w.nii.gz' % im) imdata = nb.load(image_path).get_data() fig, ax = plt.subplots() plot_slice(imdata[..., z], annotate=True) fig.savefig(op.join(outputs_path, 'figures', 'fig-06_sub-%s_slice-%03d.svg' % (im, z)), dpi=300, bbox_inches='tight') plt.clf() plt.close() fp_clear = [ ('10455', 140), ('50073', 162), ] for im, z in fp_clear: image_path = op.join(ds030_path, 'sub-%s' % im, 'anat', 'sub-%s_T1w.nii.gz' % im) imdata = nb.load(image_path).get_data() fig, ax = plt.subplots() plot_slice(imdata[..., z], annotate=True) fig.savefig(op.join(outputs_path, 'figures', 'fig-06_sub-%s_slice-%03d.svg' % (im, z)), dpi=300, bbox_inches='tight') plt.clf() plt.close()
notebooks/Paper-v2.0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # **Amazon Lookout for Equipment** - Demonstration on an anonymized expander dataset # *Part 1: Data preparation* # # **Change your bucket name here:** BUCKET = '<YOUR_BUCKET_NAME_HERE>' PREFIX = 'data' # ## Initialization # --- # This repository is initially structured as follow: # ``` # /lookout-equipment-demo # | # +-- dataset/ <<< Original dataset <<< # | |-- labels.csv # | |-- tags_description.csv # | |-- timeranges.txt # | \-- timeseries.zip # | # +-- notebooks/ # | |-- 1_data_preparation.ipynb <<< This notebook <<< # | |-- 2_dataset_creation.ipynb # | |-- 3_model_training.ipynb # | |-- 4_model_evaluation.ipynb # | \-- 5_inference_scheduling.ipynb # | # +-- utils/ # |-- lookout_equipment_utils.py # \-- lookoutequipment.json # ``` # # ### Imports # + language="sh" # pip --quiet install --upgrade pip # pip --quiet install --upgrade awscli boto3 tsia # - from IPython.core.display import HTML HTML("<script>Jupyter.notebook.kernel.restart()</script>") # + import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pyarrow as pa import pyarrow.parquet as pq import shutil import sys import tsia import warnings from tqdm import tqdm # - sys.path.append('../utils') import lookout_equipment_utils as lookout # ### Parameters # + RAW_DATA = os.path.join('..', 'dataset') DATA = os.path.join('..', 'data') LABEL_DATA = os.path.join(DATA, 'labelled-data') TRAIN_DATA = os.path.join(DATA, 'training-data', 'expander') os.makedirs(DATA, exist_ok=True) os.makedirs(LABEL_DATA, exist_ok=True) os.makedirs(TRAIN_DATA, exist_ok=True) # - # %matplotlib inline plt.style.use('Solarize_Light2') plt.rcParams['lines.linewidth'] = 0.5 warnings.filterwarnings("ignore") # ## Loading datasets of interest # --- # ### Analysis time ranges # The dataset provided with this repository is one year long with some known anomaly periods appearing both at the beginning and at the end of the year. Using the following training / evaluation split, will allow Lookout for Equipment to have labelled periods on both side of the split date: # + timeranges_fname = os.path.join(DATA, 'timeranges.txt') shutil.copyfile(os.path.join(RAW_DATA, 'timeranges.txt'), timeranges_fname) with open(timeranges_fname, 'r') as f: timeranges = f.readlines() training_start = pd.to_datetime(timeranges[0][:-1]) training_end = pd.to_datetime(timeranges[1][:-1]) evaluation_start = pd.to_datetime(timeranges[2][:-1]) evaluation_end = pd.to_datetime(timeranges[3][:-1]) print(f'Training period: from {training_start} to {training_end}') print(f'Evaluation period: from {evaluation_start} to {evaluation_end}') # - # ### Labels # Historical maintenance record time ranges are recorded in a CSV files with two columns containing *start time* and *end time* of each range: labels_fname = os.path.join(LABEL_DATA, 'labels.csv') shutil.copyfile(os.path.join(RAW_DATA, 'labels.csv'), labels_fname) labels_df = pd.read_csv(os.path.join(LABEL_DATA, 'labels.csv'), header=None) labels_df[0] = pd.to_datetime(labels_df[0]) labels_df[1] = pd.to_datetime(labels_df[1]) labels_df.columns = ['start', 'end'] labels_df.head() # ### Time series # The raw timeseries is a zipped parquet file, let's deflate it: timeseries_fname = os.path.join(RAW_DATA, 'timeseries.zip') # !unzip -o $timeseries_fname -d $DATA/training-data # The dataframe stored there has 122 tags and 480,886 rows, ranging from *January 1st* to *November 30, 2015*: # + all_tags_fname = os.path.join(DATA, 'training-data', 'expander.parquet') table = pq.read_table(all_tags_fname) all_tags_df = table.to_pandas() del table print(all_tags_df.shape) all_tags_df.head() # - # ### Tags description # This dataset comes with a tag description file including: # # * `Tag`: the tag name as it is recorded by the customer in his historian system (for instance the [Honeywell process history database](https://www.honeywellprocess.com/en-US/explore/products/advanced-applications/uniformance/Pages/uniformance-phd.aspx)) # * `UOM`: the unit of measure for the recorded signal # * `Subsystem`: an ID linked to the part of the asset this sensor is attached to tags_description_fname = os.path.join(RAW_DATA, 'tags_description.csv') tags_description_df = pd.read_csv(tags_description_fname) tags_description_df.head() # Let's extract a list of features from this table: we group them by unit of measure for more convenience: features = list(tags_description_df.sort_values(by='UOM')['Tag']) # ## Dataset overview # --- # Build a list of dataframes, one per feature (this will be useful for visualizations purpose). An early event in the year skews the data: we remove that part for visualization purpose only (hence the `start` and `end` range definition below), but will keep the period starting January 1st as a training period later on. # + start = pd.to_datetime('2015-04-05 00:00:00') end = evaluation_end df_list = [] feature_groups = dict() for f in features: # Get the unit of measure for the current feature: uom = str(list(tags_description_df.loc[tags_description_df['Tag'] == f, 'UOM'])[0]) # We have already some features in this group, add it: if uom in feature_groups.keys(): feature_groups.update({uom: feature_groups[uom] + [f]}) # Otherwise, create this group: else: feature_groups.update({uom: [f]}) # Add the dataframe to the list: current_df = all_tags_df.loc[start:end, [f]] current_df = current_df.replace(np.nan, 0.0) df_list.append(current_df) # + tag = 'signal-028' tag_df = all_tags_df.loc[start:end, [tag]] tag_df.columns = ['Value'] fig, axes = lookout.plot_timeseries( tag_df, tag, fig_width=20, tag_split=evaluation_start, labels_df=labels_df ) # - fig = tsia.plot.plot_multivariate_timeseries( timeseries_list=df_list, tags_list=features, split_date=evaluation_start, tags_description_df=tags_description_df, tags_grouping_key='UOM', num_cols=4, col_size=5 ) # + # %%time # Discretize each signal in 3 bins: array = tsia.markov.discretize_multivariate(df_list) # Grouping the signals based on their unit of measure (UOM): num_timesteps = array.shape[1] separator = np.zeros(shape=(1, num_timesteps)) separator = np.where(separator==0, np.nan, separator) grouped_array = [] signal_list = [] current_row = 0 for uom in feature_groups.keys(): num_features = len(feature_groups[uom]) signal_list = signal_list + features[current_row:current_row + num_features + 1] signal_list.append(uom) grouped_array.append(array[current_row:current_row + num_features + 1]) grouped_array.append(separator) current_row += num_features grouped_array = np.concatenate(grouped_array) # Plot the strip chart: tsia.plot.plot_timeseries_strip_chart( grouped_array, signal_list=signal_list, fig_width=20, dates=df_list[0].index.to_pydatetime(), day_interval=2 ) # - # ## Building and uploading the dataset # --- # We will structure our S3 bucket like this: # ``` # s3://sagemaker-lookout-equipment-demo/ # | # +-- training-data/ # | | # | +-- expander/ # | | # | +-- subsystem-01 # | | \-- subsystem-01.csv # | | # | +-- subsystem-02 # | | \-- subsystem-02.csv # | | # | +-- ... # | | # | \-- subsystem-24 # | \-- subsystem-24.csv # | # +-- labelled-data/ # | # \-- labels.csv # ``` # Process each subsystem one by one: components = list(tags_description_df['Subsystem'].unique()) progress_bar = tqdm(components) for component in progress_bar: progress_bar.set_description(f'Component {component}') progress_bar.refresh() # Check if CSV file already exist and do not overwrite it: component_tags_fname = os.path.join(TRAIN_DATA, f'{component}', f'{component}.csv') if not os.path.exists(component_tags_fname): # Build the dataframe with all the signal timeseries for the current subsystem: component_tags_list = list(tags_description_df[tags_description_df['Subsystem'] == component]['Tag']) component_tags_df = all_tags_df[component_tags_list] component_tags_df = component_tags_df.reset_index() component_tags_df['Timestamp'] = component_tags_df['Timestamp'].dt.strftime('%Y-%m-%dT%H:%M:%S.%f') # Save to disk: os.makedirs(os.path.join(TRAIN_DATA, f'{component}'), exist_ok=True) component_tags_df.to_csv(component_tags_fname, index=None) # Uploading training dataset to S3: training_src_dir = TRAIN_DATA training_s3_dest_path = f's3://{BUCKET}/{PREFIX}/training-data/expander' # !aws s3 cp --recursive $training_src_dir $training_s3_dest_path # Uploading label dataset to S3: label_src_fname = os.path.join(LABEL_DATA, 'labels.csv') label_s3_dest_path = f's3://{BUCKET}/{PREFIX}/labelled-data/labels.csv' # !aws s3 cp $label_src_fname $label_s3_dest_path # ## Conclusion # --- # At this stage, we have built: # * A single Parquet dataset that contains all the historical data for all tags provided by the customer: this is **58,668,092** at a **1 minute** sampling rate for **122 tags**. # * **24 individual CSV files** (1 for each subsystem, each subsystem can contain several timeseries) filed in their respective subsystem directories # # Looking at the plot for **signal-028** above, we are going to try and predict the event that happens on **November 2015**: to achieve this, we will use a training set spanning from **January 2015** to **August 2015** and we will test on **September 2015** to **November 2015**.
getting_started/notebooks/1_data_preparation.ipynb
/ -*- coding: utf-8 -*- / --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / kernelspec: / display_name: SQL / language: sql / name: SQL / --- / + [markdown] azdata_cell_guid="cbdcb39b-fd7c-473a-8c94-cf68be2a6cd3" / This query will show us all user databases which are on the instance and whether they are encrypted. / + azdata_cell_guid="e6c9b9df-9efb-400c-9e26-a999d0a9ad83" SELECT db.name, db.create_date, db.compatibility_level, db.collation_name, db.user_access_desc, db.state_desc, db.recovery_model_desc, db.page_verify_option_desc, db.is_encrypted FROM sys.databases AS db WHERE db.database_id > 4 ; / + [markdown] azdata_cell_guid="bacb0b62-647c-4983-bb70-d0f2c5ca4893" /  Enable TDE on user database in Managed Instance. / + azdata_cell_guid="508c2401-f872-4cda-b02f-0115d1b88383" ALTER DATABASE AdventureWorks2019 SET ENCRYPTION ON; GO / + [markdown] azdata_cell_guid="08901589-c603-412e-8970-8761b6922adc" /  Check on the status of the encryption process for the Adventureworks2019 database. / + azdata_cell_guid="a4d148c7-e81e-4777-add8-2eb6775d0627" DECLARE @DatabaseName SYSNAME = 'Adventureworks2019'; SELECT DB_NAME(dek.database_id) AS DbName, dek.encryption_scan_state_desc, dek.encryption_state_desc, dek.percent_complete, dek.encryptor_thumbprint, dek.encryptor_type FROM sys.dm_database_encryption_keys AS dek WHERE dek.database_id = DB_ID(@DatabaseName) ;
MigrateDbToManagedInstanceAddTde/BlogPostTSQL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import random import pandas as pd import nltk from nltk.corpus import treebank from sklearn.model_selection import train_test_split from nltk.tokenize import word_tokenize from nltk.corpus import stopwords import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict from torch.utils.data import TensorDataset from torch.utils.data import DataLoader from torch import optim import numpy as np import re nltk.download('punkt') nltk.download('treebank') nltk.download('stopwords') description_df = pd.read_csv('../dataset/binary_classifiers/description.csv') installation_df = pd.read_csv('../dataset/binary_classifiers/installation.csv') invocation_df = pd.read_csv('../dataset/binary_classifiers/invocation.csv') citation_df = pd.read_csv('../dataset/binary_classifiers/citation.csv') # def lower_stopwords(x): # x = x.lower() # text_tokens = [word for word in word_tokenize(x) if word not in stopwords.words()] # return " ".join(text_tokens) def lower_stopwords(x): x = re.sub(r'[^a-zA-Z\s]', '', x, re.I|re.A) x = x.lower() x = x.strip() text_tokens = [word for word in word_tokenize(x) if word not in stopwords.words()] return " ".join(text_tokens) # print(description_df.groupby(by = "description").count()) # print(description_df.count()) # print(installation_df.count()) # print(invocation_df.count()) # print(citation_df.count()) print(description_df["excerpt"][6]) neg_quant = int(len(description_df) * .375) treebank_background = pd.DataFrame(list(map(lambda sent: ' '.join(sent), random.sample(list(treebank.sents()), neg_quant))), columns=["excerpt"]).assign(description=False) description_corpus = pd.concat([description_df.assign(description=True), installation_df.sample(neg_quant).assign(description=False), invocation_df.sample(neg_quant).assign(description=False), citation_df.sample(neg_quant).assign(description=False),treebank_background], sort=False) description_corpus.drop('URL', 1, inplace=True) description_corpus.dropna(0, inplace=True) description_corpus.reset_index(drop=True, inplace=True) description_corpus["excerpt"] = description_corpus["excerpt"].apply(lower_stopwords) print(description_corpus.groupby(by = "description").count()) X, y = description_corpus.excerpt, description_corpus.description X_train, X_test, y_train, y_test = train_test_split(X, y) from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import confusion_matrix def curtail(array,threshold = 0.0): return array * (array>=threshold) # + bina = False tf_idf = False threshold = 0.0 if(bina): if(tf_idf): vectorizer = TfidfVectorizer(use_idf=True,binary=True) X_vect_train = vectorizer.fit_transform(X_train).toarray() X_vect_train = curtail(X_vect_train,threshold) X_vect_test = vectorizer.transform(X_test).toarray() else: vectorizer = CountVectorizer(binary=True) X_vect_train = vectorizer.fit_transform(X_train).toarray() X_vect_test = vectorizer.transform(X_test).toarray() else: if(tf_idf): vectorizer = TfidfVectorizer(use_idf=True) X_vect_train = vectorizer.fit_transform(X_train).toarray() X_vect_train = curtail(X_vect_train,threshold) X_vect_test = vectorizer.transform(X_test).toarray() else: vectorizer = CountVectorizer() X_vect_train = vectorizer.fit_transform(X_train).toarray() X_vect_test = vectorizer.transform(X_test).toarray() y_hot_train = pd.get_dummies(y_train.values) y_hot_test = pd.get_dummies(y_test.values) class FFN(nn.Module): def __init__(self,layer_arch,input_size,output_size,bias = True): super(FFN,self).__init__() self.layer_arch = layer_arch self.input_size = input_size self.output_size = output_size self.bias = bias self.build_model() def build_model(self): model_arch = [] unit = self.input_size for i,num in enumerate(self.layer_arch): model_arch.append(("dense_"+str(i), nn.Linear(unit,num,bias = self.bias))) model_arch.append(("nonlinear_"+str(i), nn.ReLU())) if(i==1 or i==3 or i==5): model_arch.append(("dropout_"+str(i), nn.Dropout())) unit = num model_arch.append(("dense_final",nn.Linear(unit,self.output_size,bias=self.bias))) model_arch.append(("act_final",nn.Sigmoid())) self.model = nn.Sequential(OrderedDict(model_arch)) def forward(self,inputs): return self.model(inputs) # - print(X_vect_train[2]) print(X_vect_test.shape) print(X_train[155]) model = FFN([256,1024,2048,1024,1024,512,256],X_vect_train.shape[1],2) epoch_num = 30 bs = 300 history = [] train_ds = TensorDataset(torch.tensor(X_vect_train).float(), torch.tensor(y_hot_train.values).float()) train_dl = DataLoader(train_ds, batch_size=bs) opt = optim.Adam(model.parameters()) for i in range(epoch_num): for xb,yb in train_dl: target = torch.argmax(yb,dim = 1,keepdim=False) pred = model(xb) loss_fun = nn.CrossEntropyLoss() loss = loss_fun(pred,target) loss.backward() opt.step() opt.zero_grad() #### test metrics ##### test_pred = model(torch.tensor(X_vect_test).float()) test_pred = torch.argmax(test_pred,dim=1,keepdim=False) ground_truth = torch.argmax(torch.tensor(y_hot_test.values).float(),dim=1,keepdim=False) tn, fp, fn, tp = confusion_matrix(test_pred,ground_truth).ravel() ### precision ### precision = (tp/(tp+fp))*100 ### recall ### recall = (tp/(tp+fn))*100 ### F-measure ### F_measure = (2*precision*recall)/(precision+recall) ### accuracy ### accuracy = (torch.true_divide((torch.sum((test_pred-ground_truth)==0)),test_pred.shape[0]))*100 print("test accuracy is {}".format(accuracy)) print("test precision is {}".format(precision)) print("test recall is {}".format(recall)) print("test F-measure is {}".format(F_measure)) print("************************") history.append({"accuracy":accuracy,"precision":precision,"recall":recall,"F_measure":F_measure}) import matplotlib.pyplot as plt # + from sklearn import metrics ground_truth = torch.argmax(torch.tensor(y_hot_test.values).float(),dim=1,keepdim=False) test_pred_prob = model(torch.tensor(X_vect_test).float()) test_pred = torch.argmax(test_pred_prob,dim=1,keepdim=False) fpr, tpr, thresholds = metrics.roc_curve(ground_truth.detach().numpy(), test_pred_prob[:,1].detach().numpy()) fig,ax = plt.subplots() ax.plot(fpr,tpr) plt.show() # - n = len(history) accu_array = np.array([history[i]["accuracy"] for i in range(n)]) prec_array = np.array([history[i]["precision"] for i in range(n)]) recall_array = np.array([history[i]["recall"] for i in range(n)]) F_array = np.array([history[i]["F_measure"] for i in range(n)]) np.save('../visualization/data/description_accu', accu_array) np.save('../visualization/data/description_prec', prec_array) np.save('../visualization/data/description_recall', recall_array) np.save('../visualization/data/description_f', F_array) # + fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2,figsize=(16, 13)) interval = 4 ax1.plot([i+1 for i in range(n)],accu_array) ax1.set_xlabel("num_epochs") ax1.set_ylabel("accuracy") ax1.set_title("accuracy") ax1.grid() ax1.set_yticks([i for i in np.arange(min(accu_array[~np.isnan(accu_array)]),101,interval)]) ax2.plot([i+1 for i in range(n)],prec_array) ax2.set_xlabel("num_epochs") ax2.set_ylabel("precision") ax2.set_title("precision") ax2.grid() ax2.set_yticks([i for i in np.arange(min(prec_array[~np.isnan(prec_array)]),101,interval)]) ax3.plot([i+1 for i in range(n)],recall_array) ax3.set_xlabel("num_epochs") ax3.set_ylabel("recall") ax3.set_title("recall") ax3.grid() ax3.set_yticks([i for i in np.arange(min(recall_array[~np.isnan(recall_array)]),101,interval)]) ax4.plot([i+1 for i in range(n)],F_array) ax4.set_xlabel("num_epochs") ax4.set_ylabel("F_measure") ax4.set_title("F_measure") ax4.grid() ax4.set_yticks([i for i in np.arange(min(F_array[~np.isnan(F_array)]),101,interval)]) plt.tight_layout() plt.show() # -
binary_classifier/description_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd A = [1,2,3,4] B = [9,4,3,4] data = pd.DataFrame({'A': A, 'B': B}) data # Create a new column containing an idicator variable C = 1 if A + B > 6, C = 0 otherwise data = data.assign(C = 0) data.loc[data.A + data.B > 6, 'C'] = 1 data
pd_Building_indicator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/gabilodeau/INF8770/blob/master/Utilisation%20de%20la%20correlation%20croisee.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="1SWdnWZf4P-y" colab_type="text" # INF8770 Technologies multimédias # # Polytechnique Montréal # # Exemple du calcul de la corrélation croisée # + [markdown] id="FRzgNPn34eHk" colab_type="text" # Exécuter les 2 lignes suivantes pour colab. # + id="R22-3PbE4fEb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="f9807bc0-4120-4a07-a41c-4e1aed28c058" # !git clone https://github.com/gabilodeau/INF8770 # %cd INF8770 # + id="ibl3Qkld4P-1" colab_type="code" colab={} import numpy as np import matplotlib.pyplot as plt from scipy.io import wavfile # + [markdown] id="1Db2gqch4P-_" colab_type="text" # Soit les trois vecteurs suivants: # + id="3MNOuirU4P_A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="e136f2b6-ab21-4f15-fd4e-ee268f93eddf" A = [1,3,3,6,3,4,7,5,4,3,2,6,3,2,4] B = [3,4,7,5] C = [10,10,11,10] plt.plot(range(len(A)), A) plt.plot(range(len(B)), B) plt.plot(range(len(C)), C) plt.show() # + [markdown] id="zs_mCkoa4P_K" colab_type="text" # On voit que B est plus ressemblant à A que C. Un vecteur est semblable ou fait partie d'un autre vecteur si la corrélation est suffisamment grande. Corrélation entre A et B: # + id="BYtS6Sq-4P_M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="ea3e481e-cc5b-4349-8675-87b37429bbf9" # On ajoute des 0 pour que les signaux soient de même taille. Ça facilite les caculs. B = np.pad(B, (0, len(A)-len(B)), 'constant') C = np.pad(C, (0, len(A)-len(C)), 'constant') Rxy=[] #Rxy Décalage de A vers la gauche, ou B vers la droite for i in range(0,len(A)): Somme = 0; for j in range(0, len(A)-i): Somme += A[i+j] * B[j] Rxy += [Somme] print(Rxy) Ryx=[] #Rxy Décalage de A vers la droite, ou B vers la gauche for i in range(0,len(A)): Somme = 0; for j in range(0, len(A)-i): Somme += B[i+j] * A[j] Ryx += [Somme] print(Ryx) CorrCroisee = Ryx[::-1] +Rxy[1:] print(CorrCroisee) # + [markdown] id="-0AT0SMK4P_S" colab_type="text" # Affichage du résultat de la correlation entre A et B # + id="tXKr7xss4P_U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="f69f6228-1adb-46c0-ec14-c90a5e147aa0" plt.plot(range(-len(A)+1,len(A)), CorrCroisee) Decalage = CorrCroisee.index(max(CorrCroisee))-len(A)+1 print('Le meilleur alignement est:', Decalage) plt.axvline(Decalage, color='k', ls='--') plt.show() # + [markdown] id="Kk-fIEVu4P_e" colab_type="text" # Vérifions maintenant la correlation croisée entre A et C avec la fonction correlate de numpy. # + id="ImwY_ea-4P_h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="7682f2e6-d67c-4ef2-fb0c-eb7210b934b3" CorrCroisee2 = np.correlate(A, C, "full") print(CorrCroisee2) # + [markdown] id="9ADmAImH4P_o" colab_type="text" # Affichage du résultat de la correlation. Notez que la meilleure correlation entre A et B donne 99, alors que le résultat est 205 entre A et C. Ce résulat est causé par les amplitudes différentes des signaux B et C. Un signal de plus grande amplitude aura nécessairement une meilleure corrélation. # + id="YgfE4keo4P_p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="15f2c8d9-e9c7-4711-8e94-e83b958e723e" plt.plot(range(-len(A)+1,len(A)), CorrCroisee2) Decalage2 = np.argmax(CorrCroisee2)-len(C)+1 print('Le meilleur alignement est:', Decalage2) plt.axvline(Decalage2, color='k', ls='--') plt.show() # + [markdown] id="wsS7-Tni4P_z" colab_type="text" # Voici le résultat de l'alignement des signaux pour maximiser leur corrélation. L'alignement entre A et B et parfait. Ce n'est pas le cas pour C. # + id="ccVrqduX4P_0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="a8bc545d-1c1b-4386-c761-256f2328d907" plt.plot(range(len(A)), A) B = B[:4] C = C[:4] plt.plot(range(Decalage, Decalage+len(B)), B) plt.plot(range(Decalage2, Decalage2+len(C)), C) plt.show() # + [markdown] id="FU0jnBIz4P_8" colab_type="text" # Maintenant, un exemple d'application pour rechercher un extrait audio. On normalize les signaux pour obtenir des résultats indépendant de l'amplitude. Lecture d'un fichier wav d'une personne disant zéro, un, deux, cinq. # + id="A9-IKWX34P_9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="47055b9e-a67b-4e22-ebf6-267bf5200f10" [fs1, signal1] = wavfile.read('0125.wav') moy = np.mean(signal1) ecart = np.std(signal1) signal1 = (signal1 - moy)/ecart #normalisation. plt.figure(figsize = (10,5)) plt.plot(range(len(signal1)), signal1) plt.show() # + [markdown] id="tLhtJbHh4QAB" colab_type="text" # Une personne disant zéro. # + id="7ku97Y164QAC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="492194c5-18df-491f-fa3a-60d816a95975" [fs2, signal2] = wavfile.read('0.wav') moy = np.mean(signal2) ecart = np.std(signal2) signal2 = (signal2 - moy)/ecart plt.figure(figsize = (10,5)) plt.plot(range(len(signal2)), signal2) plt.show() # + [markdown] id="zGwULBg-4QAJ" colab_type="text" # Est-ce que 0 est inclut dans 0125, et à quel endroit. Pour l'inclusion, il faudrait établir un seuil de correlation minimum. Dans ce cas-ci, on cherche seulement à localiser le 0. # + id="rpH3jTKe4QAL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="83449f9e-ce1b-4a19-8a4c-d49ab4000c4e" CorrCroisee3 = np.correlate(signal1, signal2, "full") Decalage3 = np.argmax(CorrCroisee3)-len(signal2)+1 Maxcor = np.max(CorrCroisee3) print('Le meilleur alignement est:', Decalage3, 'avec une correlation de: ', Maxcor) # + [markdown] id="2ILZZq1y4QAS" colab_type="text" # Affichage de l'alignement trouvé. # + id="5MPSIJxv4QAT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="f05a62bf-8e3f-49f1-e837-c1170f9990c0" plt.figure(figsize = (10,5)) plt.plot(range(len(signal1)), signal1) plt.plot(range(Decalage3, Decalage3+len(signal2)), signal2) plt.show() # + id="mTBIexCK4QAe" colab_type="code" colab={}
Utilisation de la correlation croisee.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic pytorch stuff # !jupytext --to markdown "Basic pytorch.ipynb" import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset # ## Make a 3d tensor # pay attention that torch.int = int32 and simple int = int64 y = torch.tensor([ [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]], dtype = torch.int) print(y, "\n\n", y.shape) # ## Making summation over different dimentions y.sum(dim=0) y.sum(dim=1) y.sum(dim=2) # ## Float tensors x = torch.FloatTensor(2,3) print(x, x.dtype) np_array = np.random.random((2,3)).astype(float) np_array x1 = torch.FloatTensor(np_array) x2 = torch.randn(2,3) print(x1, x1.dtype, "\n\n",x2, x2.dtype) # ## Integer tensors int_tensor = torch.arange(4, dtype=torch.int) int_tensor, int_tensor.dtype int_tensor.view(2,2) torch.sum(y, dtype=torch.int) e = torch.exp(int_tensor.float()) e int_tensor*e # ## Matmul and MM plus devices torch.matmul(x1, x2.t()) torch.matmul(x1, x2.t()) np.matmul(x1.numpy(), x2.t().numpy()) torch.matmul(x1.to(torch.float64), x2.to(torch.float64).t()) device=torch.device('cuda') x1 = x1.to(device) x1.get_device() x = torch.randn(3,2) x try: x.to(device).numpy() except RuntimeError as e: print(e) except Exception as e: print(e) finally: print('OK') torch.mm(torch.ones(2,3), torch.ones(3)) torch.matmul(torch.ones(2,3), torch.ones(3)) # ## A bit of autograd x = torch.arange(0,4).float().requires_grad_(True) x y = x**2 y.sum() y.sum().backward() x.grad net = torch.nn.Linear(4,2) net f = torch.arange(0,4).float() f y = net(f) y for param in net.parameters(): print (param) # ## Простейшие перцептрончики help(torch.nn.Module) class MyNet(torch.nn.Module): def __init__(self, input_size, hidden_size): super().__init__() self.layer1 = torch.nn.Linear(input_size, hidden_size) self.layer2 = torch.nn.Linear(hidden_size,2) self.layer3 = torch.nn.Sigmoid() def forward(self, input_val): h = input_val h = self.layer1(h) h = self.layer2(h) h = self.layer3(h) return h def print_params(self): for item in self.named_parameters(): print(item) net = MyNet(4,16) net.print_params() net.forward(torch.rand(4)) class MyNet2(torch.nn.Sequential): def __init__(self, input_size, hidden_size, output_size): super().__init__(nn.Linear(input_size, hidden_size), nn.Linear(hidden_size, output_size), nn.Softmax(dim=1)) def print_params(self): for item in self.parameters(): print(item) def make_net2(input_size, hidden_size, output_size): return nn.Sequential(nn.Linear(input_size, hidden_size), nn.Linear(hidden_size, output_size), nn.Sigmoid()) ttt = MyNet2(4,16, 10) net2 = make_net2(4,16,10) net2.forward(torch.ones(4)) net2 ttt.forward(torch.ones(1,4)) output = torch.full([10, 64], 0.999) # A prediction (logit) pos_weight = torch.ones([64]) # All weights are equal to 1 criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight) criterion(output, target) # -log(sigmoid(0.999)) # # Experimenting with convolutions and autograd # We make an input tensor that does not require grad # this is an input tensor input_tensor = torch.arange(1,10).to(torch.float).view((1,1,3,3)) input_tensor print(f"The tenosr does not require a grad wrt it: {input_tensor.requires_grad}") # We make a custom conv2d layer that initializes all weights to 1 and does not have a bias class Conv2dCustom(nn.Conv2d): def __init__(self, in_channels=1, out_channels=1, kernel_size=(3,3)): super().__init__(in_channels, out_channels, kernel_size, bias=False) self.weight.data = torch.ones((in_channels, out_channels, *kernel_size)) #self.bias.data = torch.zeros(1) def __repr__(self): super_repr = super().__repr__() return super_repr+'\nweights:\n'+str(self.weight.data)#+'\nbias:\n'+str(self.bias.data) layer_conv2d = Conv2dCustom() layer_conv2d print(f'See if conv 2d requires grad: {layer_conv2d.weight.requires_grad}') # provided all weights of a convolution are 1 - this assert should be correct assert layer_conv2d(input_tensor) == input_tensor.sum().item() # no gradient so far type(layer_conv2d.weight.grad) input_tensor.sum().item() # This shows that Linear layers can be replaced with convolutions. # and now let's coumpute the output of a single conv layer and run backward grad propagation input_tensor.requires_grad_() out = layer_conv2d(input_tensor) print(out) out.backward() input_tensor print('Gradient w.r.t. input tensor (equals to weights of convolution)') input_tensor.grad layer_conv2d.weight print('Gradient w.r.t convolution weight (equals to input tensor)') layer_conv2d.weight.grad # What we see here is that multiplication is a **gradient switcher**, i.e. gradient **w.r.t x=w** and grad w.r.t **w** equals to **x** # $$ # \large Out = \sum_i^n{w_i * x_i} # $$ conv2d_filter = nn.Conv2d(1,1,(3,3)) for param, data in conv2d_filter.named_parameters(): print(param, data, data.shape) # ### 1D Convolutions def init_weight(m): from functools import reduce l = reduce(lambda x,y: x*y, m.weight.data.shape) if type(m) == nn.Conv1d: m.weight.data = torch.arange(l).to(torch.float).reshape(m.weight.data.shape) layer_conv1d = nn.Conv1d(in_channels=1, out_channels=3, kernel_size=3, bias=False) layer_conv1d.apply(init_weight) layer_conv1d.weight.data layer_conv1d.weight.requires_grad conv_1d_input = torch.arange(12).to(torch.float).view(3,1,4) conv_1d_input extra = torch.FloatTensor([[[ 0., 1., 2., 3.]]]) conv_1d_input = torch.cat((conv_1d_input, extra), dim = 0) conv_1d_input conv_1d_input[0] conv_1d_out = layer_conv1d(conv_1d_input) conv_1d_out conv_1d_out[0]
01myfiles/Basic pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="xpyEQiaL1dxc" # # <font color=blue>Assignments for "Data Exploration - Multivariate Analysis"</font> # + [markdown] id="g9Q7Sppn1dxl" # In this assignment, you will work on the `Students Performance` ([dataset](https://www.kaggle.com/spscientist/students-performance-in-exams/home)). You can reach the explanations of this data from Kaggle again. # # To complete this assignment, submit the Github link of the Jupyter notebook file containing solutions to the questions below. You can talk to your mentor on your head or ask Slack at office time. # + [markdown] id="9psqxuoy1dxp" # **(1)** Does the performance in the exams vary according to gender, origin and education level of parents? # + id="_J3mb_4S1dxs" colab={"base_uri": "https://localhost:8080/", "height": 205} executionInfo={"status": "ok", "timestamp": 1640513698020, "user_tz": -180, "elapsed": 278, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="afdf69a8-e780-485b-f186-6242cd52e0a2" import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import scipy.stats as stats from scipy.stats.mstats import winsorize import warnings students = pd.read_csv('StudentsPerformance.csv') students.head() # + colab={"base_uri": "https://localhost:8080/"} id="JAQquRRq4LSC" executionInfo={"status": "ok", "timestamp": 1640513699181, "user_tz": -180, "elapsed": 7, "user": {"displayName": "Serhan \u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="cc53934c-10e3-4bea-8f67-a77044c39e48" students.info() # + [markdown] id="pltzrf6ssmg1" # # Genders # + id="rUKaWpuzrWX_" executionInfo={"status": "ok", "timestamp": 1640513700189, "user_tz": -180, "elapsed": 7, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} genders = students["gender"].unique() grup_students = students.groupby("gender") # + colab={"base_uri": "https://localhost:8080/", "height": 260} id="Ro4Ui1zlqziM" executionInfo={"status": "ok", "timestamp": 1640513701418, "user_tz": -180, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="92d6a3ba-4929-4879-ce40-d0ddab7ed465" pd.options.display.float_format = '{:.15f}'.format for var in ["math score", "reading score", "writing score"]: comparison = pd.DataFrame(columns=['group_1', 'group_2','statistic', 'p_value']) print("For the sake of comparison {}".format(var),end='') for i in range(0, len(genders)): for j in range(i+1, len(genders)): ttest = stats.ttest_ind(students[students["gender"]==genders[i]][var], students[students["gender"]==genders[j]][var]) group_1 = genders[i] group_2 = genders[j] statistic = ttest[0] p_value = ttest[1] comparison = comparison.append({"group_1" : group_1 , "group_2" : group_2 , "statistic": statistic , "p_value" : p_value}, ignore_index=True) display(comparison) # + [markdown] id="G7bBKeS4ubBy" # By t-test, one can see that the gender diversity **affects** the mean of the exam results. # + colab={"base_uri": "https://localhost:8080/", "height": 143} id="jIy0ZJcBJe8t" executionInfo={"status": "ok", "timestamp": 1640513704235, "user_tz": -180, "elapsed": 281, "user": {"displayName": "Serhan \u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="3b838810-4169-4793-805d-cae87a582794" students.groupby(['gender']).mean() # + [markdown] id="Pr9vLhIyss04" # # Origins # + id="OT3h91jRsjcu" executionInfo={"status": "ok", "timestamp": 1640513705094, "user_tz": -180, "elapsed": 4, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} origins = students["race/ethnicity"].unique() grup_students = students.groupby("race/ethnicity") # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="FbR2zzclsibO" executionInfo={"status": "ok", "timestamp": 1640513710498, "user_tz": -180, "elapsed": 11, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="7492b293-98c9-4df1-fe89-b507a259f9ca" pd.options.display.float_format = '{:.15f}'.format for var in ["math score", "reading score", "writing score"]: comparison = pd.DataFrame(columns=['group_1', 'group_2','statistic', 'p_value']) print("For the sake of comparison {}".format(var),end='') for i in range(0, len(origins)): for j in range(i+1, len(origins)): ttest = stats.ttest_ind(students[students["race/ethnicity"]==origins[i]][var], students[students["race/ethnicity"]==origins[j]][var]) group_1 = origins[i] group_2 = origins[j] statistic = ttest[0] p_value = ttest[1] comparison = comparison.append({"group_1" : group_1 , "group_2" : group_2 , "statistic": statistic , "p_value" : p_value}, ignore_index=True) display(comparison) # + [markdown] id="eZcJHoenvMwe" # # # * In the scores of math exams, (group B and group C) & (group C and group A) & (group B and group A) does **not** differ that much, whereas the others **does** change. # * In general, one can see the differences by checking whether t-statistics value is greater than or equal to +-1.96 in the upper tables. # # # + colab={"base_uri": "https://localhost:8080/", "height": 237} id="vX29ynuFG7FW" executionInfo={"status": "ok", "timestamp": 1640513710914, "user_tz": -180, "elapsed": 9, "user": {"displayName": "Serhan \u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="d2756297-2d77-4aee-8f4d-868281c60ac3" #this is to see the real mean values of different ethnicities. students.groupby(['race/ethnicity']).mean() # + [markdown] id="KzXQPS5GvVdy" # # Parental lvl of Ed. # + id="_wQzclhGvZud" executionInfo={"status": "ok", "timestamp": 1640513712143, "user_tz": -180, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} parents = students["parental level of education"].unique() grup_students = students.groupby("parental level of education") # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="B-A02xZCvo42" executionInfo={"status": "ok", "timestamp": 1640513712987, "user_tz": -180, "elapsed": 461, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="a38892f4-99cb-4261-88e9-73a5fc8f4b0b" pd.options.display.float_format = '{:.15f}'.format for var in ["math score", "reading score", "writing score"]: comparison = pd.DataFrame(columns=['group_1', 'group_2','statistic', 'p_value']) print("For the sake of comparison {}".format(var),end='') for i in range(0, len(parents)): for j in range(i+1, len(parents)): ttest = stats.ttest_ind(students[students["parental level of education"]==parents[i]][var], students[students["parental level of education"]==parents[j]][var]) group_1 = parents[i] group_2 = parents[j] statistic = ttest[0] p_value = ttest[1] comparison = comparison.append({"group_1" : group_1 , "group_2" : group_2 , "statistic": statistic , "p_value" : p_value}, ignore_index=True) display(comparison) # + [markdown] id="kVa3JOYzw4R3" # Again, it varies for diverse categories. # # * For example, there is a greater correlation of the results of bachelor's degree and master's degree. # # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="4Ao90MTnKFRc" executionInfo={"status": "ok", "timestamp": 1640513715832, "user_tz": -180, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="17823211-a5a2-44c9-a897-75e18315718d" students.groupby(['parental level of education']).mean() # + [markdown] id="nCAXa4y-wqY2" # # Charts # + colab={"base_uri": "https://localhost:8080/", "height": 350} id="l3zgivQB1_d-" executionInfo={"status": "ok", "timestamp": 1640513717814, "user_tz": -180, "elapsed": 440, "user": {"displayName": "Serhan \u00d6<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="c41409ea-eba2-4240-efd7-f8cdc63b9bf4" plt.figure(figsize=(12,5)) plt.subplot(121) plt.scatter(students['gender'], students['math score']) plt.title('Gender & Math Score') plt.xlabel('Gender') plt.ylabel('Exam Scores') plt.subplot(122) plt.scatter(students['race/ethnicity'], students['math score']) plt.title('Origin & Math Score') plt.xlabel('Origin') plt.ylabel('Exam Scores') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 530} id="c44MiiwE3UEI" executionInfo={"status": "ok", "timestamp": 1640513720035, "user_tz": -180, "elapsed": 392, "user": {"displayName": "Serhan \u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="39871d7c-a2ac-4230-95c4-609a2411b76c" plt.figure(figsize=(10,8)) plt.scatter(students['parental level of education'], students['math score']) plt.title('Parental Level of Education & Math Score') plt.xlabel('Parental Level of Education') plt.ylabel('Exam Scores') # + [markdown] id="5mGnXjEU1dxt" # **(2)** Does lunch type have a relationship with exam performances? How can you explain this, if any? # + id="kF57AKtCqw85" executionInfo={"status": "ok", "timestamp": 1640513721775, "user_tz": -180, "elapsed": 389, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} lunchs = students["lunch"].unique() grup_students = students.groupby("lunch") # + colab={"base_uri": "https://localhost:8080/", "height": 260} id="u4Wtrck6xV9l" executionInfo={"status": "ok", "timestamp": 1640513725999, "user_tz": -180, "elapsed": 666, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="f31ff3b7-1f55-403a-c5e3-7a380e29f922" pd.options.display.float_format = '{:.15f}'.format for var in ["math score", "reading score", "writing score"]: comparison = pd.DataFrame(columns=['group_1', 'group_2','statistic', 'p_value']) print("For the sake of comparison {}".format(var),end='') for i in range(0, len(lunchs)): for j in range(i+1, len(lunchs)): ttest = stats.ttest_ind(students[students["lunch"]==lunchs[i]][var], students[students["lunch"]==lunchs[j]][var]) group_1 = lunchs[i] group_2 = lunchs[j] statistic = ttest[0] p_value = ttest[1] comparison = comparison.append({"group_1" : group_1 , "group_2" : group_2 , "statistic": statistic , "p_value" : p_value}, ignore_index=True) display(comparison) # + [markdown] id="mp43yMTzxoKO" # Our t-statistics values are so high, that means we have to reject the null hypothesis. Thus, lunch type affects the exam results, unfortunately. # + colab={"base_uri": "https://localhost:8080/", "height": 143} id="1HLsOomVKwW6" executionInfo={"status": "ok", "timestamp": 1640513734735, "user_tz": -180, "elapsed": 276, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="03bc5f6a-e025-4385-8cc5-2ba345497914" students.groupby(['lunch']).mean() # + id="KTg2udpa1dxu" colab={"base_uri": "https://localhost:8080/", "height": 512} executionInfo={"status": "ok", "timestamp": 1640513737369, "user_tz": -180, "elapsed": 1394, "user": {"displayName": "Serhan \u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="706327ed-e8bb-4947-d91b-a4d0711f769b" plt.figure(figsize=(15,8)) plt.subplot(131) plt.scatter(students['lunch'], students['math score']) plt.title('Lunch Type & Math Scores') plt.xlabel('Lunch Type') plt.ylabel('Exam Scores') plt.subplot(132) plt.scatter(students['lunch'], students['reading score']) plt.title('Lunch Type & Reading Scores') plt.xlabel('Lunch Type') plt.ylabel('Exam Scores') plt.subplot(133) plt.scatter(students['lunch'], students['writing score']) plt.title('Lunch Type & Writing Scores') plt.xlabel('Lunch Type') plt.ylabel('Exam Scores') plt.show() # + [markdown] id="MNQWrGUB1dxu" # **(3)** Does the preparation courses have an impact on exam performance? # + id="oYgSDeZC09zB" executionInfo={"status": "ok", "timestamp": 1640513738020, "user_tz": -180, "elapsed": 333, "user": {"displayName": "Serhan \u00d6<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} courses = students["test preparation course"].unique() grup_students = students.groupby("test preparation course") # + colab={"base_uri": "https://localhost:8080/", "height": 260} id="GMqoVTMN1SHv" executionInfo={"status": "ok", "timestamp": 1640513738600, "user_tz": -180, "elapsed": 10, "user": {"displayName": "Serhan \u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="3173fd10-4c6b-418a-b827-6eccd9a0aca5" pd.options.display.float_format = '{:.15f}'.format for var in ["math score", "reading score", "writing score"]: comparison = pd.DataFrame(columns=['group_1', 'group_2','statistic', 'p_value']) print("For the sake of comparison {}".format(var),end='') for i in range(0, len(courses)): for j in range(i+1, len(courses)): ttest = stats.ttest_ind(students[students["test preparation course"]==courses[i]][var], students[students["test preparation course"]==courses[j]][var]) group_1 = courses[i] group_2 = courses[j] statistic = ttest[0] p_value = ttest[1] comparison = comparison.append({"group_1" : group_1 , "group_2" : group_2 , "statistic": statistic , "p_value" : p_value}, ignore_index=True) display(comparison) # + [markdown] id="VZIyk2wsLPlb" # Again, the test prep courses affects the exam results higher than we expected. # + id="n6QqEPAl1dxz" colab={"base_uri": "https://localhost:8080/", "height": 512} executionInfo={"status": "ok", "timestamp": 1640513741019, "user_tz": -180, "elapsed": 889, "user": {"displayName": "Serhan \u00d6<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="ded3ca8f-511f-46fa-99fb-a74143aec8ed" plt.figure(figsize=(15,8)) plt.subplot(131) plt.scatter(students['test preparation course'], students['math score']) plt.title('Prep Course Types & Math Scores') plt.xlabel('Prep Course Type') plt.ylabel('Exam Scores') plt.subplot(132) plt.scatter(students['test preparation course'], students['reading score']) plt.title('Prep Course Types & Reading Scores') plt.xlabel('Prep Course Type') plt.ylabel('Exam Scores') plt.subplot(133) plt.scatter(students['test preparation course'], students['writing score']) plt.title('Prep Course Types & Writing Scores') plt.xlabel('Prep Course Type') plt.ylabel('Exam Scores') plt.show() # + [markdown] id="YLpe0STQ5qFH" # #Yes, it affects the exam results. # + [markdown] id="wE7JE2Od1dxy" # **(4)** Which lessons are most correlated with each other? # + id="pT7oXuix1dxv" colab={"base_uri": "https://localhost:8080/", "height": 512} executionInfo={"status": "ok", "timestamp": 1640513743170, "user_tz": -180, "elapsed": 890, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="7d189cdc-57f2-4fbc-e0fb-ade4959b0d1b" plt.figure(figsize=(15,8)) plt.subplot(131) plt.scatter(students['reading score'], students['math score']) plt.title('Reading Scores & Math Scores') plt.xlabel('Reading Scores') plt.ylabel('Math Scores') plt.subplot(132) plt.scatter(students['writing score'], students['reading score']) plt.title('Writing Scores & Reading Scores') plt.xlabel('Writing Scores') plt.ylabel('Reading Scores') plt.subplot(133) plt.scatter(students['writing score'], students['math score']) plt.title('Writing Scores & Math Scores') plt.xlabel('Writing Scores') plt.ylabel('Math Scores') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 143} id="rQNQq8NN61W-" executionInfo={"status": "ok", "timestamp": 1640513744565, "user_tz": -180, "elapsed": 10, "user": {"displayName": "Serhan \u00d6<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="6e675d1f-14ef-4e88-b6fc-524261f40fd1" students.corr() # + [markdown] id="l_MSrYrd6OLP" # # Writing & Reading Scores are much more collerated than the other options.
EDA_Assignments/A_06_DataExplorationMultivariateAnalysis_en_SerhanOnerAksakal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:virt_SparsDT] # language: python # name: conda-env-virt_SparsDT-py # --- # ## Debug the SparsDT # import lib # %load_ext autoreload # %autoreload 2 import sys sys.path.append('../') import torch import numpy as np import lib.models as model import lib.util as util # ### Testing estimating alpha Y = torch.rand([10,1600],dtype=torch.float32) SparsDT = model.ModelSparseDT(10,Y, num_samples=1e5) # ### Testing the optimizer args = { 'lr' : 0.1 } SparsDT.set_optimizer('SGD', args) # ### Testing estim gamma U = torch.randn([SparsDT.m_rows, 1000]) a = SparsDT._estimate_gamma(U) a.shape # ### Testing find U, norm alpha, Loss U = SparsDT._findU(1000) U.shape norm = SparsDT._norm_alpha(SparsDT.A, U) norm.shape gamma = SparsDT._estimate_gamma(U) l = SparsDT.Loss(U,gamma) l # ### Testing fit A = SparsDT.fit(max_iter=1e3, rand_U=True, num_col = 100) # ### Testing SparsDT on a SalphaS signal m = 16 n = 24 k = 500 alpha = 1.2 beta = 0 A = util.Generate_A(m,n) X = util.Generate_alpha_random(alpha, beta, shape=(n, k)) Y = A.mm(X) SparsDT = model.ModelSparseDT(n,Y, num_samples=1e6, A=A) args = { 'lr' : 0.1 } SparsDT.set_optimizer('Adam', args) Ahat = SparsDT.fit(max_iter=1000, rand_U=True) util.calc_correct(Ahat, A).mean()
notebooks/Debug - SparsDT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %%writefile baselines.py # -*- coding: utf-8 -*- """ Created on Fri Jun 26 11:57:27 2015 @author: <NAME> """ import numpy as np import pandas as pd class RandomPred: ''' RandomPred() Initializes a random predcitor, which is a baseline predictor that gives back a random score for each item. ''' def fit(self, data): ''' Dummy function for training. Parameters -------- data: pandas.DataFrame Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps). It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties). ''' pass def predict_next(self, session_id, input_item_id, predict_for_item_ids): ''' Gives predicton scores for a selected set of items on how likely they be the next item in the session. Parameters -------- session_id : int or string The session IDs of the event. input_item_id : int or string The item ID of the event. predict_for_item_ids : 1D array IDs of items for which the network should give prediction scores. Returns -------- out : pandas.Series Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs. ''' return pd.Series(data=np.random.rand(len(predict_for_item_ids)), index=predict_for_item_ids) class Pop: ''' Pop(top_n=100, item_key='ItemId', support_by_key=None) Popularity predictor that gives higher scores to items with larger support. The score is given by: .. math:: r_{i}=\\frac{supp_i}{(1+supp_i)} Parameters -------- top_n : int Only give back non-zero scores to the top N ranking items. Should be higher or equal than the cut-off of your evaluation. (Default value: 100) item_key : string The header of the item IDs in the training data. (Default value: 'ItemId') support_by_key : string or None If not None, count the number of unique values of the attribute of the training data given by the specified header. If None, count the events. (Default value: None) ''' def __init__(self, top_n = 100, item_key = 'ItemId', support_by_key = None): self.top_n = top_n self.item_key = item_key self.support_by_key = support_by_key def fit(self, data): ''' Trains the predictor. Parameters -------- data: pandas.DataFrame Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps). It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties). ''' grp = data.groupby(self.item_key) self.pop_list = grp.size() if self.support_by_key is None else grp[self.support_by_key].nunique() self.pop_list = self.pop_list / (self.pop_list + 1) self.pop_list.sort_values(ascending=False, inplace=True) self.pop_list = self.pop_list.head(self.top_n) def predict_next(self, session_id, input_item_id, predict_for_item_ids): ''' Gives predicton scores for a selected set of items on how likely they be the next item in the session. Parameters -------- session_id : int or string The session IDs of the event. input_item_id : int or string The item ID of the event. predict_for_item_ids : 1D array IDs of items for which the network should give prediction scores. Every ID must be in the set of item IDs of the training set. Returns -------- out : pandas.Series Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs. ''' preds = np.zeros(len(predict_for_item_ids)) mask = np.in1d(predict_for_item_ids, self.pop_list.index) preds[mask] = self.pop_list[predict_for_item_ids[mask]] return pd.Series(data=preds, index=predict_for_item_ids) class SessionPop: ''' SessionPop(top_n=100, item_key='ItemId', support_by_key=None) Session popularity predictor that gives higher scores to items with higher number of occurrences in the session. Ties are broken up by adding the popularity score of the item. The score is given by: .. math:: r_{s,i} = supp_{s,i} + \\frac{supp_i}{(1+supp_i)} Parameters -------- top_n : int Only give back non-zero scores to the top N ranking items. Should be higher or equal than the cut-off of your evaluation. (Default value: 100) item_key : string The header of the item IDs in the training data. (Default value: 'ItemId') support_by_key : string or None If not None, count the number of unique values of the attribute of the training data given by the specified header. If None, count the events. (Default value: None) ''' def __init__(self, top_n = 100, item_key = 'ItemId', support_by_key = None): self.top_n = top_n self.item_key = item_key self.support_by_key = support_by_key def fit(self, data): ''' Trains the predictor. Parameters -------- data: pandas.DataFrame Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps). It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties). ''' grp = data.groupby(self.item_key) self.pop_list = grp.size() if self.support_by_key is None else grp[self.support_by_key].nunique() self.pop_list = self.pop_list / (self.pop_list + 1) self.pop_list.sort_values(ascending=False, inplace=True) self.pop_list = self.pop_list.head(self.top_n) self.prev_session_id = -1 def predict_next(self, session_id, input_item_id, predict_for_item_ids): ''' Gives predicton scores for a selected set of items on how likely they be the next item in the session. Parameters -------- session_id : int or string The session IDs of the event. If changed during subsequent calls, a new session starts. input_item_id : int or string The item ID of the event. Must be in the set of item IDs of the training set. predict_for_item_ids : 1D array IDs of items for which the network should give prediction scores. Every ID must be in the set of item IDs of the training set. Returns -------- out : pandas.Series Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs. ''' if self.prev_session_id != session_id: self.prev_session_id = session_id self.pers = dict() v = self.pers.get(input_item_id) if v: self.pers[input_item_id] = v + 1 else: self.pers[input_item_id] = 1 preds = np.zeros(len(predict_for_item_ids)) mask = np.in1d(predict_for_item_ids, self.pop_list.index) ser = pd.Series(self.pers) preds[mask] = self.pop_list[predict_for_item_ids[mask]] mask = np.in1d(predict_for_item_ids, ser.index) preds[mask] += ser[predict_for_item_ids[mask]] return pd.Series(data=preds, index=predict_for_item_ids) class ItemKNN: ''' ItemKNN(n_sims = 100, lmbd = 20, alpha = 0.5, session_key = 'SessionId', item_key = 'ItemId', time_key = 'Time') Item-to-item predictor that computes the the similarity to all items to the given item. Similarity of two items is given by: .. math:: s_{i,j}=\sum_{s}I\{(s,i)\in D & (s,j)\in D\} / (supp_i+\\lambda)^{\\alpha}(supp_j+\\lambda)^{1-\\alpha} Parameters -------- n_sims : int Only give back non-zero scores to the N most similar items. Should be higher or equal than the cut-off of your evaluation. (Default value: 100) lmbd : float Regularization. Discounts the similarity of rare items (incidental co-occurrences). (Default value: 20) alpha : float Balance between normalizing with the supports of the two items. 0.5 gives cosine similarity, 1.0 gives confidence (as in association rules). session_key : string header of the session ID column in the input file (default: 'SessionId') item_key : string header of the item ID column in the input file (default: 'ItemId') time_key : string header of the timestamp column in the input file (default: 'Time') ''' def __init__(self, n_sims = 100, lmbd = 20, alpha = 0.5, session_key = 'SessionId', item_key = 'ItemId', time_key = 'Time'): self.n_sims = n_sims self.lmbd = lmbd self.alpha = alpha self.item_key = item_key self.session_key = session_key self.time_key = time_key def fit(self, data): ''' Trains the predictor. Parameters -------- data: pandas.DataFrame Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps). It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties). ''' data.set_index(np.arange(len(data)), inplace=True) itemids = data[self.item_key].unique() n_items = len(itemids) data = pd.merge(data, pd.DataFrame({self.item_key:itemids, 'ItemIdx':np.arange(len(itemids))}), on=self.item_key, how='inner') sessionids = data[self.session_key].unique() data = pd.merge(data, pd.DataFrame({self.session_key:sessionids, 'SessionIdx':np.arange(len(sessionids))}), on=self.session_key, how='inner') supp = data.groupby('SessionIdx').size() session_offsets = np.zeros(len(supp)+1, dtype=np.int32) session_offsets[1:] = supp.cumsum() index_by_sessions = data.sort_values(['SessionIdx', self.time_key]).index.values supp = data.groupby('ItemIdx').size() item_offsets = np.zeros(n_items+1, dtype=np.int32) item_offsets[1:] = supp.cumsum() index_by_items = data.sort_values(['ItemIdx', self.time_key]).index.values self.sims = dict() count = 0 for i in range(n_items): if count % 1000 == 0: print( 'Train itemKNN process: ', count, ' of ', n_items, ' items: ', ( count / n_items * 100.0 )) count += 1 iarray = np.zeros(n_items) start = item_offsets[i] end = item_offsets[i+1] for e in index_by_items[start:end]: uidx = data.SessionIdx.values[e] ustart = session_offsets[uidx] uend = session_offsets[uidx+1] user_events = index_by_sessions[ustart:uend] iarray[data.ItemIdx.values[user_events]] += 1 iarray[i] = 0 norm = np.power((supp[i] + self.lmbd), self.alpha) * np.power((supp.values + self.lmbd), (1.0 - self.alpha)) norm[norm == 0] = 1 iarray = iarray / norm indices = np.argsort(iarray)[-1:-1-self.n_sims:-1] self.sims[itemids[i]] = pd.Series(data=iarray[indices], index=itemids[indices]) def predict_next(self, session_id, input_item_id, predict_for_item_ids): ''' Gives predicton scores for a selected set of items on how likely they be the next item in the session. Parameters -------- session_id : int or string The session IDs of the event. input_item_id : int or string The item ID of the event. Must be in the set of item IDs of the training set. predict_for_item_ids : 1D array IDs of items for which the network should give prediction scores. Every ID must be in the set of item IDs of the training set. Returns -------- out : pandas.Series Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs. ''' preds = np.zeros(len(predict_for_item_ids)) sim_list = self.sims[input_item_id] mask = np.in1d(predict_for_item_ids, sim_list.index) preds[mask] = sim_list[predict_for_item_ids[mask]] return pd.Series(data=preds, index=predict_for_item_ids) class BPR: ''' BPR(n_factors = 100, n_iterations = 10, learning_rate = 0.01, lambda_session = 0.0, lambda_item = 0.0, sigma = 0.05, init_normal = False, session_key = 'SessionId', item_key = 'ItemId') Bayesian Personalized Ranking Matrix Factorization (BPR-MF). During prediction time, the current state of the session is modelled as the average of the feature vectors of the items that have occurred in it so far. Parameters -------- n_factor : int The number of features in a feature vector. (Default value: 100) n_iterations : int The number of epoch for training. (Default value: 10) learning_rate : float Learning rate. (Default value: 0.01) lambda_session : float Regularization for session features. (Default value: 0.0) lambda_item : float Regularization for item features. (Default value: 0.0) sigma : float The width of the initialization. (Default value: 0.05) init_normal : boolean Whether to use uniform or normal distribution based initialization. session_key : string header of the session ID column in the input file (default: 'SessionId') item_key : string header of the item ID column in the input file (default: 'ItemId') ''' def __init__(self, n_factors = 100, n_iterations = 10, learning_rate = 0.01, lambda_session = 0.0, lambda_item = 0.0, sigma = 0.05, init_normal = False, session_key = 'SessionId', item_key = 'ItemId'): self.n_factors = n_factors self.n_iterations = n_iterations self.learning_rate = learning_rate self.lambda_session = lambda_session self.lambda_item = lambda_item self.sigma = sigma self.init_normal = init_normal self.session_key = session_key self.item_key = item_key self.current_session = None def init(self, data): self.U = np.random.rand(self.n_sessions, self.n_factors) * 2 * self.sigma - self.sigma if not self.init_normal else np.random.randn(self.n_sessions, self.n_factors) * self.sigma self.I = np.random.rand(self.n_items, self.n_factors) * 2 * self.sigma - self.sigma if not self.init_normal else np.random.randn(self.n_items, self.n_factors) * self.sigma self.bU = np.zeros(self.n_sessions) self.bI = np.zeros(self.n_items) def update(self, uidx, p, n): uF = np.copy(self.U[uidx,:]) iF1 = np.copy(self.I[p,:]) iF2 = np.copy(self.I[n,:]) sigm = self.sigmoid(iF1.T.dot(uF) - iF2.T.dot(uF) + self.bI[p] - self.bI[n]) c = 1.0 - sigm self.U[uidx,:] += self.learning_rate * (c * (iF1 - iF2) - self.lambda_session * uF) self.I[p,:] += self.learning_rate * (c * uF - self.lambda_item * iF1) self.I[n,:] += self.learning_rate * (-c * uF - self.lambda_item * iF2) return np.log(sigm) def fit(self, data): ''' Trains the predictor. Parameters -------- data: pandas.DataFrame Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps). It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties). ''' itemids = data[self.item_key].unique() self.n_items = len(itemids) self.itemidmap = pd.Series(data=np.arange(self.n_items), index=itemids) sessionids = data[self.session_key].unique() self.n_sessions = len(sessionids) data = pd.merge(data, pd.DataFrame({self.item_key:itemids, 'ItemIdx':np.arange(self.n_items)}), on=self.item_key, how='inner') data = pd.merge(data, pd.DataFrame({self.session_key:sessionids, 'SessionIdx':np.arange(self.n_sessions)}), on=self.session_key, how='inner') self.init(data) for it in range(self.n_iterations): c = [] for e in np.random.permutation(len(data)): uidx = data.SessionIdx.values[e] iidx = data.ItemIdx.values[e] iidx2 = data.ItemIdx.values[np.random.randint(self.n_items)] err = self.update(uidx, iidx, iidx2) c.append(err) print(it, np.mean(c)) def predict_next(self, session_id, input_item_id, predict_for_item_ids): ''' Gives predicton scores for a selected set of items on how likely they be the next item in the session. Parameters -------- session_id : int or string The session IDs of the event. input_item_id : int or string The item ID of the event. Must be in the set of item IDs of the training set. predict_for_item_ids : 1D array IDs of items for which the network should give prediction scores. Every ID must be in the set of item IDs of the training set. Returns -------- out : pandas.Series Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs. ''' iidx = self.itemidmap[input_item_id] if self.current_session is None or self.current_session != session_id: self.current_session = session_id self.session = [iidx] else: self.session.append(iidx) uF = self.I[self.session].mean(axis=0) iIdxs = self.itemidmap[predict_for_item_ids] return pd.Series(data=self.I[iIdxs].dot(uF) + self.bI[iIdxs], index=predict_for_item_ids) def sigmoid(self, x): return 1.0 / (1.0 + np.exp(-x)) # -
ipython/3_Training_Predicting/Algorithms/baselines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/setiawantirta/Corona-CADD/blob/main/python/CDD_ML_Part_1_bioactivity_data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="wSFbIMb87cHu" # # **Computational Drug Discovery (CDD) [Part 1] Download Bioactivity Data** # # <NAME> # # [*'Data Professor' YouTube channel*](http://youtube.com/dataprofessor) # # In this Jupyter notebook, we will be building a real-life **data science project** that you can include in your **data science portfolio**. Particularly, we will be building a machine learning model using the ChEMBL bioactivity data. # # --- # + [markdown] id="3iQiERxumDor" # ## **ChEMBL Database** # # The [*ChEMBL Database*](https://www.ebi.ac.uk/chembl/) is a database that contains curated bioactivity data of more than 2 million compounds. It is compiled from more than 76,000 documents, 1.2 million assays and the data spans 13,000 targets and 1,800 cells and 33,000 indications. # [Data as of March 25, 2020; ChEMBL version 26]. # + [markdown] id="iryGAwAIQ4yf" # ## **Installing libraries** # + [markdown] id="toGT1U_B7F2i" # Install the ChEMBL web service package so that we can retrieve bioactivity data from the ChEMBL Database. # + id="cJGExHQBfLh7" outputId="a962358c-5ea4-4fb2-9c26-ef7532392a1e" colab={"base_uri": "https://localhost:8080/"} # ! pip install chembl_webresource_client # + [markdown] id="J0kJjL8gb5nX" # ## **Importing libraries** # + id="RXoCvMPPfNrv" # Import necessary libraries import pandas as pd from chembl_webresource_client.new_client import new_client # + [markdown] id="1FgUai1bfigC" # ## **Search for Target protein** # + [markdown] id="7lBsDrD0gAqH" # ### **Target search for coronavirus** # + id="Vxtp79so4ZjF" outputId="6c26aae4-a798-4b10-b94e-36882376764a" colab={"base_uri": "https://localhost:8080/", "height": 677} # Target search for coronavirus target = new_client.target target_query = target.search('coronavirus') # Ini digunakan untuk mencari target proteinnya targets = pd.DataFrame.from_dict(target_query) targets # + [markdown] id="Y5OPfEALjAfZ" # ### **Select and retrieve bioactivity data for *SARS coronavirus 3C-like proteinase* (fifth entry)** # + [markdown] id="gSQ3aroOgML7" # We will assign the fifth entry (which corresponds to the target protein, *coronavirus 3C-like proteinase*) to the ***selected_target*** variable # + id="StrcHMVLha7u" outputId="3ae22ae3-9adf-4924-9372-ae81c8ec08ce" colab={"base_uri": "https://localhost:8080/", "height": 34} selected_target = targets.target_chembl_id[4] selected_target # + [markdown] id="GWd2DRalgjzB" # Here, we will retrieve only bioactivity data for *coronavirus 3C-like proteinase* (CHEMBL3927) that are reported as IC$_{50}$ values in nM (nanomolar) unit. # + id="LeFbV_CsSP8D" activity = new_client.activity res = activity.filter(target_chembl_id=selected_target).filter(standard_type="IC50") # + id="RC4T-NEmSWV-" df = pd.DataFrame.from_dict(res) # + id="s9iUAXFdSkoM" outputId="4ca11e81-e94e-4508-bbba-fba3c7ea065f" colab={"base_uri": "https://localhost:8080/", "height": 312} df.head(3) # jika hanya df maka akan menampilkan semua data # + id="oNtBv36dYhxy" outputId="1d0d11ad-6e6e-471f-8af9-7d41f03f7bcd" colab={"base_uri": "https://localhost:8080/"} df.standard_type.unique() # + [markdown] id="fQ78N26Fg15T" # Finally we will save the resulting bioactivity data to a CSV file **bioactivity_data.csv**. # + id="ZvUUEIVxTOH1" df.to_csv('bioactivity_data.csv', index=False) # + [markdown] id="BOrSrTGjOWU7" # ## **Copying files to Google Drive** # + [markdown] id="PRputWaI7ZW7" # Firstly, we need to mount the Google Drive into Colab so that we can have access to our Google adrive from within Colab. # + id="6RBX658q65A5" outputId="d331fa22-5b85-4e5c-d15f-14b6df68a0dc" colab={"base_uri": "https://localhost:8080/"} from google.colab import drive drive.mount('/content/gdrive/', force_remount=True) # + [markdown] id="CMlY0xudN1mL" # Next, we create a **data** folder in our **Colab Notebooks** folder on Google Drive. # + id="ZMIgNL9qm0Xk" # ! ls "/content/gdrive/My Drive/Colab Notebooks/" # untuk mengecek apakah data/folder di dalam folder Colab Notebook # + id="tew-UtUWIS__" # ! mkdir "/content/gdrive/My Drive/Colab Notebooks/Bioinformatics" # untuk membuat folder di dalam folder Colab Notebook # + id="YDMBpK2XJ_rJ" # ! cp bioactivity_data.csv "/content/gdrive/My Drive/Colab Notebooks/Bioinformatics" # + id="iRIr1QiEJtuw" outputId="ec612a13-38dc-47b5-86d3-8e1d1201a516" colab={"base_uri": "https://localhost:8080/"} # ! ls -l "/content/gdrive/My Drive/Colab Notebooks/Bioinformatics" # -l digunakan untuk melihat waktu file ini ditambahkan/dibuat # + [markdown] id="z9NwrYJni8CH" # Let's see the CSV files that we have so far. # + id="FO3cZC5vnCht" outputId="7287b064-0654-4973-8919-d135dae6e878" colab={"base_uri": "https://localhost:8080/"} # ! ls # + [markdown] id="7UAasSu5jAeB" # Taking a glimpse of the **bioactivity_data.csv** file that we've just created. # + id="jwEJjx5b5gAn" outputId="80b6ba72-835c-42fa-f1f7-7afcbb9d61fb" colab={"base_uri": "https://localhost:8080/"} # ! head bioactivity_data.csv # + [markdown] id="_GXMpFNUOn_8" # ## **Handling missing data** # If any compounds has missing value for the **standard_value** column then drop it # + id="hkVOdk6ZR396" outputId="02d7557d-36ae-4a76-e60a-0004e15c01c5" colab={"base_uri": "https://localhost:8080/", "height": 834} df2 = df[df.standard_value.notna()] df2 # + [markdown] id="Y-qNsUlmjS25" # Apparently, for this dataset there is no missing data. But we can use the above code cell for bioactivity data of other target protein. # + [markdown] id="5H4sSFAWhV9B" # ## **Data pre-processing of the bioactivity data** # + [markdown] id="tO22XVlzhkXR" # ### **Labeling compounds as either being active, inactive or intermediate** # The bioactivity data is in the IC50 unit. Compounds having values of less than 1000 nM will be considered to be **active** while those greater than 10,000 nM will be considered to be **inactive**. As for those values in between 1,000 and 10,000 nM will be referred to as **intermediate**. # + id="1E8rz7oMOd-5" bioactivity_class = [] for i in df2.standard_value: if float(i) >= 10000: bioactivity_class.append("inactive") elif float(i) <= 1000: bioactivity_class.append("active") else: bioactivity_class.append("intermediate") # + [markdown] id="PFsmb2N9hnTB" # ### **Iterate the *molecule_chembl_id* to a list** # + id="DMJng9xnVnMM" mol_cid = [] for i in df2.molecule_chembl_id: mol_cid.append(i) # + [markdown] id="YRieJc9dhuVZ" # ### **Iterate *canonical_smiles* to a list** # + id="AT8qUBk1eVmj" canonical_smiles = [] for i in df2.canonical_smiles: canonical_smiles.append(i) # + [markdown] id="DZFugUXxhwjE" # ### **Iterate *standard_value* to a list** # + id="ZaPt-FjEZNBe" standard_value = [] for i in df2.standard_value: standard_value.append(i) # + [markdown] id="Nv2dzid_hzKd" # ### **Combine the 4 lists into a dataframe** # + id="TWlYO4I3Wrh-" data_tuples = list(zip(mol_cid, canonical_smiles, bioactivity_class, standard_value)) df3 = pd.DataFrame( data_tuples, columns=['molecule_chembl_id', 'canonical_smiles', 'bioactivity_class', 'standard_value']) # + id="Li64nUiZQ-y2" outputId="a1d4cdb5-922d-4573-9f8b-abcb7ef72a58" colab={"base_uri": "https://localhost:8080/", "height": 415} df3 # + [markdown] id="vE0Vvo6ic3MI" # ### **Alternative method** # # Sama saja dengan cara di atas tapi lebih simpel dan sederhana # + id="VICiiCtqc2ne" outputId="50ca2e71-b31c-4863-a3b1-7ac085dbd1c0" colab={"base_uri": "https://localhost:8080/", "height": 424} selection = ['molecule_chembl_id', 'canonical_smiles', 'standard_value'] df3 = df2[selection] df3 # + [markdown] id="04hnWEwisOno" # Menambahkan data bioaktifitasnya ke dalam data set # + id="d8nV77oWdbq1" outputId="bfe8584e-ea3f-4b0a-d3ff-bef869b810b1" colab={"base_uri": "https://localhost:8080/", "height": 424} pd.concat([df3,pd.Series(bioactivity_class)], axis=1) # + [markdown] id="9tlgyexWh7YJ" # Saves dataframe to CSV file # + id="nSNia7suXstR" df3.to_csv('bioactivity_preprocessed_data.csv', index=False) # + id="UuZf5-MEd-H5" outputId="2560f641-a97b-45a9-bab3-6680d62b163c" colab={"base_uri": "https://localhost:8080/"} # ! ls -l # + [markdown] id="_C7rqJKTePhV" # Let's copy to the Google Drive # + id="ZfyvJcENeHDB" # ! cp bioactivity_preprocessed_data.csv "/content/gdrive/My Drive/Colab Notebooks/Bioinformatics" # + id="7PU7yU9leLV5" outputId="ff8597e0-2a4a-4a6f-cede-62d474a4d3a5" colab={"base_uri": "https://localhost:8080/"} # ! ls "/content/gdrive/My Drive/Colab Notebooks/Bioinformatics" # + [markdown] id="ZywB5K_Dlawb" # ---
python/CDD_ML_Part_1_bioactivity_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # <NAME>, CS241, Spring Semester # Week 12, Prove 12b Milestone - Data Analysis import pandas as pd import os import seaborn as sns import matplotlib.pyplot as plt players = pd.read_csv('basketball_players.csv') # + active="" # players # - ''' 01 Calculate the mean and median number of points scored. (In other words, each row is the amount of points a player scored during a particular season. Calculate the median of these values. The result of this is that we have the median number of points players score each season.) ''' mean = players['points'].mean() mean median = players['points'].median() median ''' 02 Determine the highest number of points recorded in a single season. Identify who scored those points and the year they did so. ''' max_points = players['points'].max() max_points master = pd.read_csv('basketball_master.csv') nba = pd.merge(players, master, how='left', left_on='playerID', right_on='bioID') nba.columns player_max = nba.points == max_points chamb = nba[player_max] chamb nba.columns selected_data = chamb[['points', 'year', 'firstName', 'middleName', 'lastName']] selected_data ''' 03 Produce a boxplot that shows the distribution of total points, total assists, and total rebounds (each of these three is a separate box plot, but they can be on the same scale and in the same graphic). ''' sns.boxplot(data=nba[['points', 'rebounds', 'assists']]) ''' 04 Produce a plot that shows how the number of points scored has changed over time by showing the median of points scored per year, over time. The x-axis is the year and the y-axis is the median number of points among all players for that year. ''' median_points_per_year = nba[['points', 'year']].groupby('year').median() median_points_per_year = median_points_per_year.reset_index() sns.regplot(data=median_points_per_year, x='year', y='points')
CS241 w12/prove12b/prove12b.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ======================================================= # # PART 3: NEXT STEPS # # ======================================================= # + #Taking what we have learned from analyzing the kaggle dataset, we decided that we needed more information to try to # predict profit. We are applying what we learned from our Kaggle analysis to our the numbers dataset. # + #<NAME> Kendra Final Project #importing pandas, csv, import csv import pandas as pd import matplotlib.pyplot as plt import numpy as np import statistics #To create testing and training dfs and labels from sklearn.model_selection import train_test_split # To model the Gaussian Navie Bayes classifier from sklearn.naive_bayes import GaussianNB # To calculate the accuracy score of the model from sklearn.metrics import accuracy_score #confusion matrix from sklearn.metrics import confusion_matrix, classification_report #for pre-processing to fit all numeric data on the standard scale from sklearn.preprocessing import StandardScaler #for applying PCA function on training and testing sets from sklearn.decomposition import PCA #logistic regression from sklearn.linear_model import LogisticRegression #SVMs from sklearn.svm import SVC #For association rule mining from apyori import apriori #This will allow us to silence the warnings import warnings warnings.simplefilter("ignore") #For the confusion matrix import seaborn as sns # + #Functions that we are going to use in our file: #Creating a function that will change a column data type to category def cat_fun(df, column): df[column] = df[column].astype("category") return(df[column]) #Creating a function that will remove anything in our df and replace it with nothing def remove(df, column, object_to_remove): df[column] = df[column].str.replace(object_to_remove, "") return(df[column]) #Creating a function that will discretize our columns based on quartiles def quartile_discretize(df, column, categories): df[column] = pd.qcut(df[column], 4, labels = categories) return(df[column]) #Creating a function that will merge our dfs with a left join def left_merge_2_conditions(df1, df2, column1, column2): df = pd.merge(df1, df2, how = "left", on=[column1, column2]) return(df) #Creating a function that groups by, counts, creates a new column from the index, drops the index and changes the column names def groupby_count(df, groupby_column, count_column): new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count()) new_df.columns = ["count"] new_df[groupby_column] = new_df.index.get_level_values(0) new_df.reset_index(drop = True, inplace = True) return(new_df) #Creating a function that groups by, counts, creates a new column from the index, drops the index and changes the column names def groupby_2_count(df, groupby_column1, groupby_column2, count_column): new_df = pd.DataFrame(df.groupby([groupby_column1, groupby_column2 ])[count_column].count()) new_df.columns = ["count"] new_df[groupby_column1] = new_df.index.get_level_values(0) new_df[groupby_column2] = new_df.index.get_level_values(1) new_df.reset_index(drop = True, inplace = True) return(new_df) # Going to use matplotlib for plotting... # To create a plot we followed the following formula: # df.plot(x-axis, y-axis, kind = type of plot, color = [(we specified colors to use here)], legend = False (we did not # want a legend displayed), title = "Title") then we added a ylabel with plt.ylabel("Type label here") and an x label # with plt.xlabel("type label here"). Finally, we wanted to change the direction of the xtick names from a 90 degree angle # to no angle with plt.xticks(rotation = rotation angle desired) def bar_graph_count(df, x_column, y_column, title): g = df.plot(x_column, y_column, kind = "bar", legend = False, title = title) g = plt.ylabel(y_column) g = plt.xlabel(x_column) return(g) #This will calculate the exponential moving average of the columns we want #exponential moving averages give more weight to the most recent data and less weight to older data def exp_moving_avg(d, column_to_be_meaned): d["exp_moving_avg"] = d[column_to_be_meaned].ewm(span=40,adjust=False).mean() exp_moving_avg = list(d["exp_moving_avg"]) #Adding a 0 to the first entry to exp_moving_avg exp_moving_avg = [0] + exp_moving_avg #Removing the last entry in the list exp_moving_avg.pop() #Creating a column named exp_moving_avg with the results d["exp_moving_avg"] = exp_moving_avg return(exp_moving_avg) #This will calculate the cumulative moving average def cumulative_moving_avg(d): d["moving_avg"] = d.expanding(min_periods = 1).mean() moving_avg = list(d["moving_avg"]) #Adding a 0 to the first entry to moving avg cumulative_moving_avg = [0] + moving_avg #Removing the last entry in the list cumulative_moving_avg.pop() return(cumulative_moving_avg) #This will get the list of all of the entries in the column that we are interested in for calculating the averages def getting_list_of_entries(df, column_interested_in, column_to_be_meaned): avg_people = pd.DataFrame(df.groupby([column_interested_in, "released"])[column_to_be_meaned].mean()) avg_column_scores = pd.DataFrame() column_interested = list(df[column_interested_in].unique()) return([avg_people, column_interested]) #This will make a df for our moving averages that we are calculating def making_df(people_df, column_interested_in, released, person, cumulative_avg, exp_avg): df_2 = pd.DataFrame({column_interested_in: person, "released": released, "cumulative_mean": cumulative_avg, "exp_mean": exp_avg}) return(df_2) #This includes the functions above, and will calculate the exponential and cumulative moving averages for which ever #column we specify and return a df will the column interested in, released, cumulative_mean, exp_mean def calculating_moving_avg(df, column_interested_in, column_to_be_meaned, ty): people_df = pd.DataFrame() people = getting_list_of_entries(df, column_interested_in, column_to_be_meaned) cumulative_avg = [] avg_people = people[0] avg_people for person in people[1]: d = avg_people.groupby(column_interested_in).get_group(person) cumulative_avg = cumulative_moving_avg(d) exp_avg = exp_moving_avg(d, column_to_be_meaned) d.reset_index(inplace = True) released = d["released"] df = pd.DataFrame({column_interested_in: person, "released": released, ty+"_cumulative_mean_"+column_interested_in : cumulative_avg, ty+"_exp_mean_"+column_interested_in: exp_avg}) people_df = people_df.append(df) return(people_df) #Confusion Matrix Graph Function def confusion_matrix_graph (cm, accuracy_label, type_of_df): g = plt.figure(figsize=(2,2)) g = sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r', cbar = False); g = plt.ylabel('Actual'); g = plt.xlabel('Predicted'); g = all_sample_title = type_of_df +' Accuracy Score: {0}'.format(round(accuracy_label, 4)) g = plt.title(all_sample_title, size = 12); return(g) # - #reading in the V2_TN_reports.csv that we scraped movies = pd.read_csv("V2_TN_reports_dates.csv", encoding = "ISO-8859-1") movies.head() #We are dropping the first column named Unnamed:0 movies.drop("Unnamed: 0", axis = 1, inplace = True) movies.shape #We have 1987 movies and 19 columns in our current df #We are going to drop any rows if they have nas or missing values for budget movies.dropna(inplace = True) len(movies) #We had 16 movies with missing values... #Now we are going to drop any movies with 0s in budget movies = movies[movies["ProductionBudget"] != "$0"] len(movies) #We did not have any movies with a 0 budget #We are going to drop any movies with a DomesticBoxOffice of 0 movies = movies[movies["DomesticBoxOffice"] != "$0"] len(movies) # + #We had 19 movies with missing domestic box office info #We are going to change column names to something a little more user friendly. First, we will look at the column names movies.columns # - column_names = ["creative_type", "domestic_box_office", "genre", "inflated_adj_dom_box_office", "int_box_office", "max_theaters", "open_wkend_rev", "open_wkend_theaters", "budget", "production_method", "released", "released_ww", "year", "year_ww", "source", "distributor", "engagements", "title", "world_wide_box_office"] movies.columns = column_names movies.head() #Looking at the data type for each column in our df movies.dtypes # Eventually, we need to change the following to numeric: # domestic_box_office # inflated_adj_dom_box_office # int_box_office # max_theathers # open_wkend_rev # open_wkend_theaters # budget # engagements # world_wide_box_office # We need to change the following to category: # creative_type # genre # production_method # source # distributor # We need to change the following to date: # released # released ww #Once we are done cleaning the data we are going to change the data types of the above questions. #If we change them now, when we clean the df and removed rows, the old categories #remain, and still show as possible categories. #First we need to replace the $ and ',' in the columns to be changed to numeric #First, creating a list of columns that we want to change to numeric numeric_columns = ["domestic_box_office", "inflated_adj_dom_box_office", "int_box_office", "max_theaters", "open_wkend_rev", "open_wkend_theaters", "budget", "engagements", "world_wide_box_office"] #We are using our remove function which takes the following arguments: df, column, item to remove movies["domestic_box_office"] = remove(movies, "domestic_box_office", "$") movies["domestic_box_office"] = remove(movies, "domestic_box_office", ",") movies["inflated_adj_dom_box_office"] = remove(movies, "inflated_adj_dom_box_office", "$") movies["inflated_adj_dom_box_office"] = remove(movies, "inflated_adj_dom_box_office", ",") movies["int_box_office"] = remove(movies, "int_box_office", "$") movies["int_box_office"] = remove(movies, "int_box_office", ",") movies["max_theaters"] = remove(movies, "max_theaters", ",") movies["open_wkend_theaters"] = remove(movies, "open_wkend_theaters", ",") movies["open_wkend_rev"] = remove(movies, "open_wkend_rev", "$") movies["open_wkend_rev"] = remove(movies, "open_wkend_rev", ",") movies["budget"] = remove(movies, "budget", "$") movies["budget"] = remove(movies, "budget", ",") movies["engagements"] = remove(movies, "engagements", ",") movies["world_wide_box_office"] = remove(movies, "world_wide_box_office", "$") movies["world_wide_box_office"] = remove(movies, "world_wide_box_office", ",") #Changing all of the columns in numeric_columns to numeric movies[numeric_columns] = movies[numeric_columns].apply(pd.to_numeric) # We need to change the following to date: released, released ww movies["released"] = pd.to_datetime(movies["released"]) movies["released_ww"] = pd.to_datetime(movies["released_ww"]) #Separating the month, day and year into their own columns in case we would like to analyze based on month, day or year movies["month"], movies["day"] = movies["released"].dt.month, movies["released"].dt.day movies["month_ww"], movies["day_ww"] = movies["released_ww"].dt.month, movies["released_ww"].dt.day #Checking data types again movies.dtypes #Changing the month to an ordered category cat = list(range(1,13)) #Changing the month data type from int to ordered category movies["month"] = pd.Categorical(movies["month"], ordered = True, categories = cat) movies["month_ww"] = pd.Categorical(movies["month_ww"], ordered = True, categories = cat) #Checking to see if it worked movies.month.dtype #Creating columns named domestic_profit, int_profit, ww_profit #We want to be able to look at the profit for each movie... Therefore we are creating a #profit column which is gross - budget movies["dom_profit"] = movies["domestic_box_office"] - movies["budget"] movies["int_profit"] = movies["int_box_office"] - movies["budget"] movies["ww_profit"] = movies["world_wide_box_office"] - movies["budget"] #Looking to see if that helped movies.head() #Creating a percent profit column to have a normalized way to compare profits. #percent_profit = profit/budget*100 movies["dom_percent_profit"] = movies["dom_profit"]/movies["budget"]*100 movies["int_percent_profit"] = movies["int_profit"]/movies["budget"]*100 movies["ww_percent_profit"] = movies["ww_profit"]/movies["budget"]*100 #checking to see that worked movies.head() #Writing the clean version of the df to a csv file #movies.to_csv("clean.csv", index = False) # + # #For some reason the functions do not work without rereading in the csv file... # movies = pd.read_csv("clean.csv", encoding = "ISO-8859-1") # - #Aggregating a moving average column and calculating the mean average pp for each creative type; #by calculating the mean pp for all creative types but for only the movies prior to the #movie we are calculting the mean for. dom_ct_ma = calculating_moving_avg(movies, "creative_type", "dom_percent_profit", "dom") int_ct_ma = calculating_moving_avg(movies, "creative_type", "int_percent_profit", "int") ww_ct_ma = calculating_moving_avg(movies, "creative_type", "ww_percent_profit", "ww") # #Genres: dom_genre_ma = calculating_moving_avg(movies, "genre", "dom_percent_profit", "dom") int_genre_ma = calculating_moving_avg(movies, "genre", "int_percent_profit", "int") ww_genre_ma = calculating_moving_avg(movies, "genre", "ww_percent_profit", "ww") # production_method: dom_pm_ma = calculating_moving_avg(movies, "production_method", "dom_percent_profit", "dom") int_pm_ma = calculating_moving_avg(movies, "production_method", "int_percent_profit", "int") ww_pm_ma = calculating_moving_avg(movies, "production_method", "ww_percent_profit", "ww") # source dom_source_ma = calculating_moving_avg(movies, "source", "dom_percent_profit", "dom") int_source_ma = calculating_moving_avg(movies, "source", "int_percent_profit", "int") ww_source_ma = calculating_moving_avg(movies, "source", "ww_percent_profit", "ww") # distributor: dom_distributor_ma = calculating_moving_avg(movies, "distributor", "dom_percent_profit", "dom") int_distributor_ma = calculating_moving_avg(movies, "distributor", "int_percent_profit", "int") ww_distributor_ma = calculating_moving_avg(movies, "distributor", "ww_percent_profit", "ww") #Month dom_month_ma = calculating_moving_avg(movies, "month", "dom_percent_profit", "dom") int_month_ma = calculating_moving_avg(movies, "month", "int_percent_profit", "int") ww_month_ma = calculating_moving_avg(movies, "month", "ww_percent_profit", "ww") # + #We are going to use our left_merge_2_conditions function: #Inputs: df1, df2, column to merge on 1 and column to merge on 2 movies = left_merge_2_conditions(movies, dom_ct_ma, "creative_type", "released") movies = left_merge_2_conditions(movies, int_ct_ma, "creative_type", "released") movies = left_merge_2_conditions(movies, ww_ct_ma, "creative_type", "released") movies = left_merge_2_conditions(movies, dom_genre_ma, "genre", "released") movies = left_merge_2_conditions(movies, int_genre_ma, "genre", "released") movies = left_merge_2_conditions(movies, ww_genre_ma, "genre", "released") movies = left_merge_2_conditions(movies, dom_pm_ma, "production_method", "released") movies = left_merge_2_conditions(movies, int_pm_ma, "production_method", "released") movies = left_merge_2_conditions(movies, ww_pm_ma, "production_method", "released") movies = left_merge_2_conditions(movies, dom_source_ma, "source", "released") movies = left_merge_2_conditions(movies, int_source_ma, "source", "released") movies = left_merge_2_conditions(movies, ww_source_ma, "source", "released") movies = left_merge_2_conditions(movies, dom_distributor_ma, "distributor", "released") movies = left_merge_2_conditions(movies, int_distributor_ma, "distributor", "released") movies = left_merge_2_conditions(movies, ww_distributor_ma, "distributor", "released") movies = left_merge_2_conditions(movies, dom_month_ma, "month", "released") movies = left_merge_2_conditions(movies, int_month_ma, "month", "released") movies = left_merge_2_conditions(movies, ww_month_ma, "month", "released") # - movies.head() movies.shape movies.columns #We are removing any rows that have 0s for the newly calculated columns #Looking to see what happens if we remove all the movies with a 0 for exp_mean_director and exp_mean_star movies = movies[movies["dom_cumulative_mean_creative_type"] != 0] movies = movies[movies["dom_cumulative_mean_genre"] != 0] movies = movies[movies["dom_cumulative_mean_production_method"] != 0] movies = movies[movies["dom_cumulative_mean_source"] != 0] movies = movies[movies["dom_cumulative_mean_distributor"] != 0] movies = movies[movies["dom_cumulative_mean_month"] != 0] len(movies) #We still have 1859 movies in our df #Changing creative_type, genre, production_method, source, distributor to category #We are using our cat_fun which takes the following inputs: df, column to change movies["creative_type"] = cat_fun(movies, "creative_type") movies["genre"] = cat_fun(movies, "genre") movies["production_method"] = cat_fun(movies, "production_method") movies["source"] = cat_fun(movies, "source") movies["distributor"] = cat_fun(movies, "distributor") # + #What is the breakdown of genre in our df? #Getting the count of movies for each genre in our df and saving it as a pandas df. #We are grouping by genre and then getting the count of the genre column in each group by #we could have used any column to get the count of... #We are using the groupby_count function that takes the following arguments (df, groupby_column, count_column) movies_genre = groupby_count(movies, "genre", "genre") movies_genre # - #Using our bar_graph_count function to visualize the movies_genre group #It takes the following inputs: df, x_column, y_column, title movies_genre.sort_values(['count'], ascending=[False], inplace = True) bar_graph_count(movies_genre, "genre", "count", "Visualization of the Number of Movies per Genre") #Creating a data frame of the movies creative_type count movies_ct = groupby_count(movies, "creative_type", "creative_type") movies_ct["creative_type"] #Sorting the df, so the bar graph will be in descending order movies_ct.sort_values(['count'], ascending=[False], inplace = True) bar_graph_count(movies_ct, "creative_type", "count", "Visualization of the Number of Movies per Creative Type") movies_year = groupby_count(movies, "year", "genre") movies_year bar_graph_count(movies_year, "year", "count", "Visualization of the Number of Movies per Year") movies_month = groupby_count(movies, "month", "genre") movies_month bar_graph_count(movies_month, "month", "count", "Visualization of the Number of Movies per Month") movies_source = groupby_count(movies, "source", "genre") movies_source movies_source.sort_values(['count'], ascending=[False], inplace = True) bar_graph_count(movies_source, "source", "count", "Visualization of the Number of Movies per Source") movies_distributor = groupby_count(movies, "distributor", "genre") movies_distributor movies_distributor = movies_distributor[movies_distributor["count"] > 0] movies_distributor movies_distributor.sort_values(['count'], ascending=[False], inplace = True) bar_graph_count(movies_distributor, "distributor", "count", "Visualization of the Number of Movies per Distributor") movies_production_method = groupby_count(movies, "production_method", "genre") movies_production_method movies_production_method.sort_values(['count'], ascending=[False], inplace = True) bar_graph_count(movies_production_method, "production_method", "count", "Visualization of the Number of Movies per Production Method") #Discretizing the df movies_discretized = movies.copy() #Getting a list of all of our columns movies_discretized.columns # We are going to descritize our data based on the quartiles. The categories are: # extremely_low, low, high, extremely_high # We are using our quartile_discretize function that takes the following arguments: #(df, column, category) categories = ["extremely_low", "low", "high", "extremely_high"] movies_discretized["domestic_box_office"] = quartile_discretize(movies_discretized, "domestic_box_office", categories) movies_discretized["inflated_adj_dom_box_office"] = quartile_discretize(movies_discretized, "inflated_adj_dom_box_office", categories) movies_discretized["int_box_office"] = quartile_discretize(movies_discretized, "int_box_office", categories) movies_discretized["max_theaters"] = quartile_discretize(movies_discretized, "max_theaters", categories) movies_discretized["open_wkend_rev"] = quartile_discretize(movies_discretized, "open_wkend_rev", categories) movies_discretized["open_wkend_theaters"] = quartile_discretize(movies_discretized, "open_wkend_theaters", categories) movies_discretized["budget"] = quartile_discretize(movies_discretized, "budget", categories) movies_discretized["engagements"] = quartile_discretize(movies_discretized, "engagements", categories) movies_discretized["world_wide_box_office"] = quartile_discretize(movies_discretized, "world_wide_box_office", categories) movies_discretized.columns # + #The other columns that are going to be discretized contain information regarding percent profit... We have decided to # use the following categories for percent profit #We are creating new label categories ; Discretized Percent Profit #We cannot use our function on this, because we are not discretizing by quartiles categories = ["negative", "low", "high", "extremely_high"] movies_discretized["dom_percent_profit"] = pd.cut(movies_discretized["dom_percent_profit"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_percent_profit"] = pd.cut(movies_discretized["int_percent_profit"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_percent_profit"] = pd.cut(movies_discretized["ww_percent_profit"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_cumulative_mean_creative_type"] = pd.cut(movies_discretized["dom_cumulative_mean_creative_type"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_exp_mean_creative_type"] = pd.cut(movies_discretized["dom_exp_mean_creative_type"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_cumulative_mean_creative_type"] = pd.cut(movies_discretized["int_cumulative_mean_creative_type"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_exp_mean_creative_type"] = pd.cut(movies_discretized["int_exp_mean_creative_type"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_cumulative_mean_creative_type"] = pd.cut(movies_discretized["ww_cumulative_mean_creative_type"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_exp_mean_creative_type"] = pd.cut(movies_discretized["ww_exp_mean_creative_type"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_cumulative_mean_genre"] = pd.cut(movies_discretized["dom_cumulative_mean_genre"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_exp_mean_genre"] = pd.cut(movies_discretized["dom_exp_mean_genre"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_cumulative_mean_genre"] = pd.cut(movies_discretized["int_cumulative_mean_genre"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_exp_mean_genre"] = pd.cut(movies_discretized["int_exp_mean_genre"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_cumulative_mean_genre"] = pd.cut(movies_discretized["ww_cumulative_mean_genre"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_exp_mean_genre"] = pd.cut(movies_discretized["ww_exp_mean_genre"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_cumulative_mean_production_method"] = pd.cut(movies_discretized["dom_cumulative_mean_production_method"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_exp_mean_production_method"] = pd.cut(movies_discretized["dom_exp_mean_production_method"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_cumulative_mean_production_method"] = pd.cut(movies_discretized["int_cumulative_mean_production_method"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_exp_mean_production_method"] = pd.cut(movies_discretized["int_exp_mean_production_method"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_cumulative_mean_production_method"] = pd.cut(movies_discretized["ww_cumulative_mean_production_method"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_exp_mean_production_method"] = pd.cut(movies_discretized["ww_exp_mean_production_method"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_cumulative_mean_source"] = pd.cut(movies_discretized["dom_cumulative_mean_source"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_exp_mean_source"] = pd.cut(movies_discretized["dom_exp_mean_source"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_cumulative_mean_source"] = pd.cut(movies_discretized["int_cumulative_mean_source"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_exp_mean_source"] = pd.cut(movies_discretized["int_exp_mean_source"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_cumulative_mean_source"] = pd.cut(movies_discretized["ww_cumulative_mean_source"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_exp_mean_source"] = pd.cut(movies_discretized["ww_exp_mean_source"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_cumulative_mean_distributor"] = pd.cut(movies_discretized["dom_cumulative_mean_distributor"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_exp_mean_distributor"] = pd.cut(movies_discretized["dom_exp_mean_distributor"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_cumulative_mean_distributor"] = pd.cut(movies_discretized["int_cumulative_mean_distributor"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_exp_mean_distributor"] = pd.cut(movies_discretized["int_exp_mean_distributor"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_cumulative_mean_distributor"] = pd.cut(movies_discretized["ww_cumulative_mean_distributor"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_exp_mean_distributor"] = pd.cut(movies_discretized["ww_exp_mean_distributor"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_cumulative_mean_month"] = pd.cut(movies_discretized["dom_cumulative_mean_month"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["dom_exp_mean_month"] = pd.cut(movies_discretized["dom_exp_mean_month"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_cumulative_mean_month"] = pd.cut(movies_discretized["int_cumulative_mean_month"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["int_exp_mean_month"] = pd.cut(movies_discretized["int_exp_mean_month"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_cumulative_mean_month"] = pd.cut(movies_discretized["ww_cumulative_mean_month"], [-100, 0, 50, 150, 999999], labels = categories) movies_discretized["ww_exp_mean_month"] = pd.cut(movies_discretized["ww_exp_mean_month"], [-100, 0, 50, 150, 999999], labels = categories) # - movies_discretized.dom_profit.describe() #negative = -999999999 - 0 #low = 0 - 40000000 # high = 40000000 - 79559420 # extremely_high = 79559420 - 9999999999 movies_discretized["dom_profit"] = pd.cut(movies_discretized["dom_profit"], [-999999999, 0, 40000000, 79559420, 999999999], labels = categories) movies_discretized.int_profit.describe() #negative = -999999999 - 0 #low = 0 - 3747336 # high = 3747336 - 49913670 # extremely_high = 49913670 - 9999999999 movies_discretized["int_profit"] = pd.cut(movies_discretized["int_profit"], [-999999999, 0, 3747336, 49913670, 9999999999], labels = categories) movies_discretized.ww_profit.describe() #negative = -999999999 - 0 #low = 0 - 10000000 # high = 10000000 - 303138900 # extremely_high = 303138900 - 9999999999 movies_discretized["ww_profit"] = pd.cut(movies_discretized["ww_profit"], [-999999999, 0, 10000000, 303138900, 9999999999], labels = categories) # + #We are setting new categories for the day column by creating a new column for week # week_1 is the first 7 days of the month, week_2 is days 8 - 14, week_3 is days 15 - 21, and week_4 are the # rest of the days categories = ["week_1", "week_2", "week_3", "week_4"] movies_discretized["week"] = pd.cut(movies_discretized["day"], [0, 8, 15, 22, 32], labels = categories) # - #Looking at the relationship between genre and percent profit movies_discretized_genre_pp = groupby_2_count(movies_discretized, "genre", "dom_percent_profit", "genre") movies_discretized_genre_pp #Now we are getting the sum of each genre category... We do not have a function for sum... we could go back and rework #our function. movies_discretized_genre_pp.groupby("genre")["count"].sum() movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre"] # We ultimately want a column that contains the total counts for each genre group. We are probably doing this in # a roundabout way, but as I am extremely new to python this is the best way I can think of doing it. We are going to create # a new column that replicates the genre column called genre_count and then we will use the replace function to # replace the genre names with their total count #First, replicating the income level column in a column named budget_category_count movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre"] #Now replacing the income level with the total count for each income level movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Action"], 377 ) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Adventure"], 538) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Black Comedy"], 9) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Comedy"], 315) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Concert/Performance"], 2) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Documentary"], 2) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Drama"], 249) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Horror"], 91) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Musical"], 30) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Romantic Comedy"], 76) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Thriller/Suspense"], 158) movies_discretized_genre_pp["genre_count"] = movies_discretized_genre_pp["genre_count"].replace(["Western"], 12) movies_discretized_genre_pp.head() movies_discretized_genre_pp["genre_count"] = pd.to_numeric(movies_discretized_genre_pp["genre_count"]) #Okay, we are one step closer... Now, we need to create a column that takes the counts/genre_counts * 100 movies_discretized_genre_pp["percent"] = movies_discretized_genre_pp["count"]/movies_discretized_genre_pp["genre_count"] *100 movies_discretized_genre_pp.head() '''Attempting to graph this data using a grouped bar chart: formula: df.pivot(columns, group, values).plot(kind = "type of graph", color = ["color to use, can be a list of colors"], title = "you can set the title of your graph here")''' graph = movies_discretized_genre_pp.pivot("genre", "dom_percent_profit", "percent").plot(kind="bar", color = ["crimson", "salmon", "palegreen", "darkgreen"], title = "Percent of Percent Profit to Genre Category") #Changing the y label of our graph to Percent plt.ylabel("Percent") #Changing the x axis label of our graph to Budget Category plt.xlabel("Genre") #How to change the tick labels (we ended up not needing this, but want to keep for future reference) #plt.Axes.set_xticklabels(graph, labels = ['extremely low', 'low', 'high', 'extremely high']) #moving the legend position to underneath the graph, also setting it to have 4 columns so the legend is in a #straight single line and adding a legend title plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.6), ncol = 4, title = "Percent Makeup of Genre Category")
assets/all_html/2019_09_04_Final_Project_Ali_Ho_Kendra_Osburn_P3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Offshore Wind Farming # ## Objective and Prerequisites # # In this example, you’ll learn how to solve an offshore wind power generation problem. The goal of the problem is to figure out which underwater cables should be laid to connect an offshore wind farm power network at a minimum cost. We’ll show you how to formulate a mixed-integer programming (MIP) model of this problem using the Gurobi Python API and then find an optimal solution to the problem using the Gurobi Optimizer. # # This modeling example is at the beginner level, where we assume that you know Python and that you have some knowledge about building mathematical optimization models. # # **Download the Repository** <br /> # You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). # ## Motivation # # Global climate change has already had observable effects on the environment. Glaciers have shrunk, ice on rivers and lakes is breaking up earlier than expected, plant and animal species have been affected and trees are flowering sooner than expected. The potential future effects of global climate change include more frequent wildfires, longer periods of drought in some regions and an increase in the number, duration and intensity of tropical storms. [1] # # Climate change mitigation consists of actions to limit the magnitude or rate of global warming and its related effects. # The first challenge for climate change mitigation is eliminating the burning of coal, oil and, eventually, natural gas. This is perhaps the most daunting challenge as denizens of richer nations literally eat, wear, work, play and even sleep on the products made from fossil fuels. Also, citizens of developing nations want and arguably deserve the same comforts. There are no perfect solutions for reducing dependence on fossil fuels (for example, carbon neutral biofuels can drive up the price of food and lead to forest destruction, and while nuclear power does not emit greenhouse gases, it does produce radioactive waste). Other alternatives include plant-derived plastics, biodiesel, and wind power. [2] # # Offshore wind power is the use of wind farms constructed in bodies of water, usually in the ocean, to harvest wind energy to generate electricity. Higher wind speeds are available offshore compared to on land, so offshore wind power’s electricity generation is higher per amount of capacity installed. # # The advantage of locating wind turbines offshore is that the wind is much stronger off the coasts, and unlike wind over the continent, offshore breezes can be strong in the afternoon, matching the time when people are using the most electricity. Offshore turbines can also be located close to the load centers along the coasts, such as large cities, eliminating the need for new long-distance transmission lines. # # ## Problem Description # # An offshore wind farm is a collection of wind turbines placed at sea to take advantage of the strong offshore winds. These strong winds produce more electricity, but offshore wind farms are more expensive to install and operate than those on land. # # We will use a MIP model to reduce part of the cost of building an offshore wind farm. We will compute a plan for how to lay the underwater cables that connect the turbines. These cables are necessary to transfer the power produced by the turbines to land. The plan we compute will minimize the cost to install the underwater cables, while ensuring that each turbine is connected to the shore and each cable has sufficient capacity to handle the electrical current generated. # # In our example, a wind farm is being built off the west coast of Denmark. There is a power station on the coast where all the electricity must be transferred to be distributed to the electric grid. There are also transfer stations in the wind farm where the power from several turbines can be collected and transferred along a single cable to the shore. # # There are two factors we must consider when installing the cables. First, there is a fixed cost to lay a cable on the sea floor. This cost is proportional to the distance between the two stations the cable connects. Second, we must consider how much current will flow through the cables. Connections that carry large currents need thick cables. Thick cables are more expensive than thin cables. # # The goal of this optimization problem is to decide which cables should be laid to connect the wind farm power network at a minimum cost. # # The model of offshore wind farming optimization problem is an instance of a more general optimization model known as fixed charge network flow problem. Fixed charge network flow problems can be applied to a large number of business problems -for example, in the planning of communication and transport networks. # # # ## Solution Approach # # Mathematical programming is a declarative approach where the modeler formulates a mathematical optimization model that captures the key aspects of a complex decision problem. The Gurobi Optimizer solves such models using state-of-the-art mathematics and computer science. # # A mathematical optimization model has five components, namely: # # * Sets and indices. # * Parameters. # * Decision variables. # * Objective function(s). # * Constraints. # # We now present a MIP formulation for the offshore wind farming problem. # ## Model Formulation # # ### Sets and Indices # # $G(V,E)$: Graph that represents the wind farm network, where $V$ is the set of vertices and $E$ is the set of edges. The turbines, transfer stations, and power stations are vertices in the set of vertices $V$ of the graph. The set of potential cables are the edges of the graph. # # ### Parameters # # $s_{i} \in \mathbb{R}$: Power supplied at vertex $i \in V$. Since turbines supply power, they are source vertices with $s_{i} > 0$. Transfer stations do not supply or remove power from the network so they have $s_{i} = 0$. The power station on the coast is a sink that remove all power from the wind farm so it has $s_{i} < 0$. # # $u_{i,j} \in \mathbb{R}^+ $: Maximum current capacity a cable can handle from vertex $i \in V$ to vertex $j \in V$. # # $c_{i,j} \in \mathbb{R}^+$: Cost per unit of current flow going from vertex $i \in V$ to vertex $j \in V$, i.e. the price we must pay to increase the thickness of the cable to handle an increase in current. # # $f_{i,j} \in \mathbb{R}^+$: Fixed cost of laying a cable from vertex $i \in V$ to vertex $j \in V$, and is the result of multiplying the distance between vertices by the cost per mile. # # ### Decision Variables # # $install_{i,j} \in \{0, 1 \}$: This variable is equal to 1 if we lay a cable from vertex $i \in V$ to vertex $j \in V$; and 0 otherwise. # # $flow_{i,j} \geq 0$: This non-negative continuous variable represents the amount of current flowing from vertex $i \in V$ to vertex $j \in V$. # # The goal of this optimization model is to decide which of these potential edges in the graph should be used at a minimum cost. # # ### Objective Function # # - **Total costs**. We want to minimize the total cost to install the cables. The term on the left is the variable costs (i.e. those that vary according to the current in the cable). The term on right is the fixed cost to install the cable. # # \begin{equation} # \text{Max} \quad Z = \sum_{(i,j) \in E} c_{i,j} \cdot flow_{i,j} + \sum_{(i,j) \in E} f_{i,j} \cdot install_{i,j} # \tag{0} # \end{equation} # # ### Constraints # # - **Balance**. For each vertex $i \in V$, we want to ensure the conservation of current in the network. # # \begin{equation} # \sum_{j:(i,j) \in E} flow_{i,j} - \sum_{j:(j,i) \in E} flow_{j,i} = s_{i} \quad \forall i \in V # \tag{1} # \end{equation} # # - **Capacity**. For each edge $(i,j) \in E$, we want to enforce the limits on the maximum current capacity of each cable. # # \begin{equation} # 0 \leq flow_{i,j} \leq u_{i,j} \cdot install_{i,j} \quad \forall (i,j) \in E # \tag{2} # \end{equation} # ## Python Implementation # # This example considers three turbines, one transfer station, and two power stations. The current flowing out at each vertex of the wind farm network is presented in the following table. Recall that since turbines supply power their capacity is positive. Transfer stations do not supply or remove power from the network so they have a capacity of zero. The power stations on the coast are sinks that remove all power from the wind farm network so they have demand of power, in this case we use a negative number. # # | <i></i> | Capacity in MW | # | --- | --- | # | vertex 1 | 4 | # | vertex 2 | 3 | # | vertex 3 | 2 | # | vertex 4 | 0 | # | vertex 5 | -6 | # | vertex 6 | -3 | # # # The capacity, flow cost, and fixed cost of each edge in the wind farm network are provided in the following table. # # | <i></i> | Capacity in MW | Flow cost in millions of Euros | Fixed cost in millions of Euros| # | --- | --- | --- | --- | # | Edge: (0,4) | 4 | 1 | 1 | # | Edge: (0,3) | 2 | 1 | 1 | # | Edge: (1,3) | 3 | 1 | 1 | # | Edge: (2,5) | 2 | 1 | 1 | # | Edge: (3,4) | 2 | 1 | 1 | # | Edge: (3,5) | 1 | 1 | 1 | # # # We now import the Gurobi Python Module. Then, we initialize the data structures with the given data. # %pip install gurobipy # + import gurobipy as gp from gurobipy import GRB # Parameters vertices = {0: 4, 1: 3, 2: 2, 3: 0, 4: -6, 5: -3} edges, cap, flow_cost, fixed_cost = gp.multidict({ (0,4): [4,1,1], (0,3): [2,1,1], (1,3): [3,1,1], (2,5): [2,1,1], (3,4): [2,1,1], (3,5): [1,1,1] }) # - # ### Model Deployment # # We now determine the MIP model for the offshore wind farming problem, by defining the decision variables, constraints, and objective function. Next, we start the optimization process and Gurobi finds the plan to lay cables at the offshore wind farming network that minimizes total costs. # + # MIP model formulation m = gp.Model("offshore_wind_farming") # Add variables install = m.addVars(edges, vtype=GRB.BINARY, name="Install") flow = m.addVars(edges, vtype=GRB.CONTINUOUS, name="Flow") # Add constraints m.addConstrs((flow.sum(v,'*') - flow.sum('*',v) == supply for v, supply in vertices.items()), name="Flow_conservation") m.addConstrs((flow[e] <= cap[e]*install[e] for e in edges), name="Install2flow") # Set objective m.setObjective(flow.prod(flow_cost) + install.prod(fixed_cost), GRB.MINIMIZE) m.optimize() # - # ## Analysis # # # The result of the optimization model shows that the minimum total cost value is 17 million Euros. Let's see the solution that achieves that optimal result. # # ### Cable Installation Plan # # This plan determines the layout of cables in the offshore wind farming network. # + # display which edges in the offshore wind farming network we plan to install. for origin, end in install.keys(): if (abs(install[origin, end].x) > 0.5): print(f"Install cable from location {origin + 1} to location {end + 1} in the offshore wind farming network ") # - # ### Cable Capacity Plan # # This plan determines the current flow capacity in MW of each cable installed. # + # Current flow capacity of each cable installed for origin, end in flow.keys(): if (abs(flow[origin, end].x) > 1e-6): print(f"The capacity of cable installed from location {origin + 1} to location {end + 1} is {flow[origin, end].x} MW ") # - # ## Conclusion # # In this example, we addressed an offshore wind farming problem where we want to minimize the cost of laying underwater cables to collect electricity produced by an offshore wind farm network. We learned how to formulate the problem as a MIP model. Also, we learned how to implement the MIP model formulation and solve it using the Gurobi Python API. # ## References # # [1] https://climate.nasa.gov/effects/ # # [2] https://www.scientificamerican.com/article/10-solutions-for-climate-change/ # Copyright © 2020 Gurobi Optimization, LLC
offshore_wind_farming/offshore_wind_farming_gcl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import numpy as np # load voter info data voter_info = pd.read_csv('voter_info.csv', sep='\t') # bar graph of the mean number of times moved for each age group age_groups = [18, 26, 33, 40, 50, 150]; ages = np.ones([5, 2]) mean_address_counts = np.zeros(5) for i in range(0, len(age_groups) - 1): ages[i, :] = [age_groups[i], age_groups[i+1]] temp = voter_info.loc[(voter_info.age2006>=ages[i, 0]) & (voter_info.age2006<ages[i, 1]) , 'counts'] mean_address_counts[i]=np.mean(temp[temp>0]) mean_address_counts # voter_info.loc[(voter_info.age2006>=18) & (voter_info.age2006<=24), 'age2006'] mean_address_counts df = pd.DataFrame({'age_groups':ages[:,0],'average_address_counts':mean_address_counts}) df.to_csv('average_address_counts_by_age.csv', sep='\t') voter_info = voter_info[['age2006', 'counts']] voter_info.to_csv('all_address_counts_by_age.csv', sep='\t') ages # plot the average address counts for all the age groups plt.bar(range(len(mean_address_counts)), mean_address_counts) plt.xticks(range(len(mean_address_counts)), ('18-25', '26-32', '33-39', '40-49', '50+')) plt.xlabel('Age Groups') plt.ylabel('Average Address Counts from 2006-2016') plt.show() # + # histogram of the unique address counts for ages between 20-30 temp = voter_info.loc[(voter_info.age2006>=18) & (voter_info.age2006<26) , 'counts'] temp = temp[temp>0] plt.hist(temp) plt.show() # For Poisson, the mean and the variance are both λ. If you want the confidence interval around lambda # you can calculate the standard error as sqrt(λ/n). # The 95-percent confidence interval is λ̂ ± 1.96*sqrt(λ̂ /n). print(temp.mean() - 1.96*np.sqrt(temp.mean()/temp.shape[0])) print(temp.mean() + 1.96*np.sqrt(temp.mean()/temp.shape[0])) # + # histogram of the unique address counts for ages between 30-40 temp = voter_info.loc[(voter_info.age2006>=26) & (voter_info.age2006<33) , 'counts'] temp = temp[temp>0] plt.hist(temp) plt.show() print(temp.mean() - 1.96*np.sqrt(temp.mean()/temp.shape[0])) print(temp.mean() + 1.96*np.sqrt(temp.mean()/temp.shape[0])) # + # histogram of the unique address counts for ages between 40-50 temp = voter_info.loc[(voter_info.age2006>=33) & (voter_info.age2006<40) , 'counts'] temp = temp[temp>0] plt.hist(temp) plt.show() print(temp.mean() - 1.96*np.sqrt(temp.mean()/temp.shape[0])) print(temp.mean() + 1.96*np.sqrt(temp.mean()/temp.shape[0])) # + # histogram of the unique address counts for ages between 50-60 temp = voter_info.loc[(voter_info.age2006>=40) & (voter_info.age2006<50) , 'counts'] temp = temp[temp>0] plt.hist(temp) plt.show() print(temp.mean() - 1.96*np.sqrt(temp.mean()/temp.shape[0])) print(temp.mean() + 1.96*np.sqrt(temp.mean()/temp.shape[0])) # + # histogram of the unique address counts for ages between 60-70 temp = voter_info.loc[voter_info.age2006>=50, 'counts'] temp = temp[temp>0] plt.hist(temp) plt.show() print(temp.mean() - 1.96*np.sqrt(temp.mean()/temp.shape[0])) print(temp.mean() + 1.96*np.sqrt(temp.mean()/temp.shape[0]))
docs/voter_registration/ each age group.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Importing/exporting from matador.query import DBQuery from matador.hull import QueryConvexHull kwargs = {'composition': ['LiP'], 'summary': True, 'hull_cutoff': 0.05, 'cutoff': [300, 301]} hull = QueryConvexHull(**kwargs) # ## Dump to json files from json import dump, load for doc in hull.cursor[:5]: source_root = [src for src in doc['source'] if src.endswith('.res') or src.endswith('.castep')][0].split('/')[-1] del doc['_id'] with open(source_root + '.json', 'w') as f: dump(doc, f) # ## Load from json files from glob import glob json_list = glob('*.json') hull_cursor = [] for json_file in json_list: with open(json_file, 'r') as f: hull_cursor.append(load(f)) hull_cursor[4]
docs/src/notebooks/non-interactive/LiP_import_export_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Machine Learning for Engineers: [XGBoostRegressor](https://www.apmonitor.com/pds/index.php/Main/XGBoostRegressor) # - [XGBoost Regressor](https://www.apmonitor.com/pds/index.php/Main/XGBoostRegressor) # - Source Blocks: 2 # - Description: Introduction to XGBoost for Regression # - [Course Overview](https://apmonitor.com/pds) # - [Course Schedule](https://apmonitor.com/pds/index.php/Main/CourseSchedule) # import xgboost as xgb xgbc = xgb.XGBClassifier() xgbc.fit(XA,yA) yP = xgbc.predict(XB) # + import xgboost as xgb from sklearn.datasets import make_regression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split X, y = make_regression(n_samples=1000, n_features=10, n_informative=8) Xa,Xb,ya,yb = train_test_split(X, y, test_size=0.2, shuffle=True) xgbr = xgb.XGBRegressor() xgbr.fit(Xa,ya) yp = xgbr.predict(Xb) acc = r2_score(yb,yp) print(acc) xgb.plot_importance(xgbr)
All_Source_Code/XGBoostRegressor/XGBoostRegressor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Import librairy import numpy as np import os # %matplotlib inline import matplotlib.pyplot as mp from PIL import Image import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as om import torchvision as tv torch.cuda.empty_cache() # ## Verify CUDA is install or not # + if torch.cuda.is_available(): # Make sure GPU is available dev = torch.device("cuda:0") kwar = {'num_workers': 8, 'pin_memory': True} cpu = torch.device("cpu") else: print("Warning: CUDA not found, CPU only.") dev = torch.device("cpu") kwar = {} cpu = torch.device("cpu") np.random.seed(551) # - # ## Loading Data end creating datasets # + dataDir = 'resized' # The main data directory classNames = os.listdir(dataDir) # Each type of image can be found in its own subdirectory numClass = len(classNames) # Number of types = number of subdirectories imageFiles = [[os.path.join(dataDir,classNames[i],x) for x in os.listdir(os.path.join(dataDir,classNames[i]))] for i in range(numClass)] # A nested list of filenames numEach = [len(imageFiles[i]) for i in range(numClass)] # A count of each type of image imageFilesList = [] # Created an un-nested list of filenames imageClass = [] # The labels -- the type of each individual image in the list for i in range(numClass): imageFilesList.extend(imageFiles[i]) imageClass.extend([i]*numEach[i]) numTotal = len(imageClass) # Total number of images imageWidth, imageHeight = Image.open(imageFilesList[0]).size # The dimensions of each image print("There are",numTotal,"images in",numClass,"distinct categories") print("Label names:",classNames) print("Label counts:",numEach) print("Image dimensions:",imageWidth,"x",imageHeight) # - mp.subplots(3,3,figsize=(8,8)) for i,k in enumerate(np.random.randint(numTotal, size=9)): # Take a random sample of 9 images and im = Image.open(imageFilesList[k]) # plot and label them arr = np.array(im) mp.subplot(3,3,i+1) mp.xlabel(classNames[imageClass[k]]) mp.imshow(arr,cmap='gray',vmin=0,vmax=255) mp.tight_layout() mp.show() # ## Transforming data and partitioning into training validation ans testing sets toTensor = tv.transforms.ToTensor() def scaleImage(x): # Pass a PIL image, return a tensor y = toTensor(x) if(y.min() < y.max()): # Assuming the image isn't empty, rescale so its values run from 0 to 1 y = (y - y.min())/(y.max() - y.min()) z = y - y.mean() # Subtract the mean value of the image return z imageTensor = torch.stack([scaleImage(Image.open(x)) for x in imageFilesList]) # Load, scale, and stack image (X) tensor classTensor = torch.tensor(imageClass) # Create label (Y) tensor print("Rescaled min pixel value = {:1.3}; Max = {:1.3}; Mean = {:1.3}" .format(imageTensor.min().item(),imageTensor.max().item(),imageTensor.mean().item())) # + validFrac = 0.1 # Define the fraction of images to move to validation dataset testFrac = 0.1 # Define the fraction of images to move to test dataset validList = [] testList = [] trainList = [] for i in range(numTotal): rann = np.random.random() # Randomly reassign images if rann < validFrac: validList.append(i) elif rann < testFrac + validFrac: testList.append(i) else: trainList.append(i) nTrain = len(trainList) # Count the number in each set nValid = len(validList) nTest = len(testList) print("Training images =",nTrain,"Validation =",nValid,"Testing =",nTest) # - trainIds = torch.tensor(trainList) # Slice the big image and label tensors up into validIds = torch.tensor(validList) # training, validation, and testing tensors testIds = torch.tensor(testList) trainX = imageTensor[trainIds,:,:,:] trainY = classTensor[trainIds] validX = imageTensor[validIds,:,:,:] validY = classTensor[validIds] testX = imageTensor[testIds,:,:,:] testY = classTensor[testIds] # ## Modéle architecture class MedNet(nn.Module): def __init__(self,xDim,yDim,numC): # Pass image dimensions and number of labels when initializing a model super(MedNet,self).__init__() # Extends the basic nn.Module to the MedNet class # The parameters here define the architecture of the convolutional portion of the CNN. Each image pixel # has numConvs convolutions applied to it, and convSize is the number of surrounding pixels included # in each convolution. Lastly, the numNodesToFC formula calculates the final, remaining nodes at the last # level of convolutions so that this can be "flattened" and fed into the fully connected layers subsequently. # Each convolution makes the image a little smaller (convolutions do not, by default, "hang over" the edges # of the image), and this makes the effective image dimension decreases. numConvs1 = 20 convSize1 = 8 numConvs2 = 20 convSize2 = 7 numNodesToFC = numConvs2*(xDim-(convSize1-1)-(convSize2-1))*(yDim-(convSize1-1)-(convSize2-1)) # nn.Conv2d(channels in, channels out, convolution height/width) # 1 channel -- grayscale -- feeds into the first convolution. The same number output from one layer must be # fed into the next. These variables actually store the weights between layers for the model. self.cnv1 = nn.Conv2d(1, numConvs1, convSize1) self.cnv2 = nn.Conv2d(numConvs1, numConvs2, convSize2) # These parameters define the number of output nodes of each fully connected layer. # Each layer must output the same number of nodes as the next layer begins with. # The final layer must have output nodes equal to the number of labels used. fcSize1 = 400 fcSize2 = 80 # nn.Linear(nodes in, nodes out) # Stores the weights between the fully connected layers self.ful1 = nn.Linear(numNodesToFC,fcSize1) self.ful2 = nn.Linear(fcSize1, fcSize2) self.ful3 = nn.Linear(fcSize2,numC) def forward(self,x): # This defines the steps used in the computation of output from input. # It makes uses of the weights defined in the __init__ method. # Each assignment of x here is the result of feeding the input up through one layer. # Here we use the activation function elu, which is a smoother version of the popular relu function. x = F.elu(self.cnv1(x)) # Feed through first convolutional layer, then apply activation x = F.elu(self.cnv2(x)) # Feed through second convolutional layer, apply activation x = x.view(-1,self.num_flat_features(x)) # Flatten convolutional layer into fully connected layer x = F.elu(self.ful1(x)) # Feed through first fully connected layer, apply activation x = F.elu(self.ful2(x)) # Feed through second FC layer, apply output x = self.ful3(x) # Final FC layer to output. No activation, because it's used to calculate loss return x def num_flat_features(self, x): # Count the individual nodes in a layer size = x.size()[1:] num_features = 1 for s in size: num_features *= s return num_features # model = MedNet(imageWidth,imageHeight,numClass,numConvs1=20,numConvs2=20,convSize1=8,convSize2=7).to(dev) model = MedNet(imageWidth,imageHeight,numClass).to(dev) # ## training the model # + code_folding=[] learnRate = 0.03 # Define a learning rate. maxEpochs = 30 # Maximum training epochs t2vRatio = 1.2 # Maximum allowed ratio of validation to training loss t2vEpochs = 6 # Number of consecutive epochs before halting if validation loss exceeds above limit batchSize = 300 # Batch size. Going too large will cause an out-of-memory error. trainBats = nTrain // batchSize # Number of training batches per epoch. Round down to simplify last batch validBats = nValid // batchSize # Validation batches. Round down testBats = -(-nTest // batchSize) # Testing batches. Round up to include all CEweights = torch.zeros(numClass) # This takes into account the imbalanced dataset. for i in trainY.tolist(): # By making rarer images count more to the loss, CEweights[i].add_(1) # we prevent the model from ignoring them. CEweights = 1. / CEweights.clamp_(min=1.) # Weights should be inversely related to count CEweights = (CEweights * numClass / CEweights.sum()).to(dev) # The weights average to 1 opti = om.Adam(model.parameters()) # Initialize an optimizer for i in range(maxEpochs): model.train() # Set model to training mode epochLoss = 0. permute = torch.randperm(nTrain) # Shuffle data to randomize batches trainX = trainX[permute,:,:,:] trainY = trainY[permute] for j in range(trainBats): # Iterate over batches opti.zero_grad() # Zero out gradient accumulated in optimizer batX = trainX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev) # Slice shuffled data into batches batY = trainY[j*batchSize:(j+1)*batchSize].to(dev) # .to(dev) moves these batches to the GPU yOut = model(batX) # Evalute predictions loss = F.cross_entropy(yOut, batY,weight=CEweights) # Compute loss epochLoss += loss.item() # Add loss loss.backward() # Backpropagate loss opti.step() # Update model weights using optimizer validLoss = 0. permute = torch.randperm(nValid) # We go through the exact same steps, without backprop / optimization validX = validX[permute,:,:,:] # in order to evaluate the validation loss validY = validY[permute] model.eval() # Set model to evaluation mode with torch.no_grad(): # Temporarily turn off gradient descent for j in range(validBats): opti.zero_grad() batX = validX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev) batY = validY[j*batchSize:(j+1)*batchSize].to(dev) yOut = model(batX) validLoss += F.cross_entropy(yOut, batY,weight=CEweights).item() epochLoss /= trainBats # Average loss over batches and print validLoss /= validBats print("Epoch = {:-3}; Training loss = {:.4f}; Validation loss = {:.4f}".format(i,epochLoss,validLoss)) if validLoss > t2vRatio * epochLoss: t2vEpochs -= 1 # Test if validation loss exceeds halting threshold if t2vEpochs < 1: print("Validation loss too high; halting to prevent overfitting") break # - confuseMtx = np.zeros((numClass,numClass),dtype=int) # Create empty confusion matrix model.eval() with torch.no_grad(): permute = torch.randperm(nTest) # Shuffle test data testX = testX[permute,:,:,:] testY = testY[permute] for j in range(testBats): # Iterate over test batches batX = testX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev) batY = testY[j*batchSize:(j+1)*batchSize].to(dev) yOut = model(batX) # Pass test batch through model pred = yOut.max(1,keepdim=True)[1] # Generate predictions by finding the max Y values for j in torch.cat((batY.view_as(pred), pred),dim=1).tolist(): # Glue together Actual and Predicted to confuseMtx[j[0],j[1]] += 1 # make (row, col) pairs, and increment confusion matrix correct = sum([confuseMtx[i,i] for i in range(numClass)]) # Sum over diagonal elements to count correct predictions print("Correct predictions: ",correct,"of",nTest) print("Confusion Matrix:") print(confuseMtx) print(classNames) # + def scaleBack(x): # Pass a tensor, return a numpy array from 0 to 1 if(x.min() < x.max()): # Assuming the image isn't empty, rescale so its values run from 0 to 1 x = (x - x.min())/(x.max() - x.min()) return x[0].to(cpu).numpy() # Remove channel (grayscale anyway) model.eval() mp.subplots(3,3,figsize=(8,8)) imagesLeft = 9 permute = torch.randperm(nTest) # Shuffle test data testX = testX[permute,:,:,:] testY = testY[permute] for j in range(testBats): # Iterate over test batches batX = testX[j*batchSize:(j+1)*batchSize,:,:,:].to(dev) batY = testY[j*batchSize:(j+1)*batchSize].to(dev) yOut = model(batX) # Pass test batch through model pred = yOut.max(1)[1].tolist() # Generate predictions by finding the max Y values for i, y in enumerate(batY.tolist()): if imagesLeft and y != pred[i]: # Compare the actual y value to the prediction imagesLeft -= 1 mp.subplot(3,3,9-imagesLeft) mp.xlabel(classNames[pred[i]]) # Label image with what the model thinks it is mp.imshow(scaleBack(batX[i]),cmap='gray',vmin=0,vmax=1) mp.tight_layout() mp.show() # - # ### Optimizer from skorch import NeuralNetClassifier # + # device = 'cuda' if torch.cuda.is_available() else 'cpu' # torch.manual_seed(0) # net = NeuralNetClassifier(model,optimizer__momentum=0.95, max_epochs=10, lr=0.1, device= device) # + # net.fit(trainX, trainY) # + # # pred # from sklearn.metrics import accuracy_score # y_pred = net.predict(testX) # + # accuracy_score(testY, y_pred) # + # skorch_SGD = om.SGD # skorch_ASGD = om.ASGD # skorch_ADAM = om.Adam # skorch_CrossEntropy = nn.CrossEntropyLoss # skorch_MSELoss = nn.MSELoss # torch.manual_seed(551) # net = NeuralNetClassifier(module=MedNet, # module__xDim=imageWidth, # module__yDim=imageHeight, # module__numC=numClass, # module__numConvs1=20, # module__numConvs2=20, # module__convSize1=8, # module__convSize2=7, # criterion=skorch_CrossEntropy, # optimizer=skorch_ADAM, # optimizer__lr=0.001, # max_epochs=30, # device='cuda') # + #net.get_params().keys() # + # from sklearn.model_selection import GridSearchCV # # params = { # # 'lr': [0.01, 0.02, 0.03, 0.04], # # 'max_epochs': [10, 20, 30, 40], # # 'iterator_train__batch_size': [10, 100, 300, 600], # # 'iterator_valid__batch_size': [10, 100, 300, 600], # # } # params = { # 'lr': [0.1], # # 'module__numConvs1': [5], # # 'module__numConvs2': [10], # # 'max_epochs': [20,30], # # 'optimizer': [skorch_ADAM], # # 'criterion': [skorch_CrossEntropy], # } # # skorch_ASGD skorch_MSELoss, skorch_SGD # gs = GridSearchCV(net, params, refit=False, scoring='accuracy', verbose=3) # gs.fit(trainX, trainY) # - #print(gs.best_score_, gs.best_params_) # + # from sklearn.metrics import accuracy_score # y_pred = net.predict(testX) # + # accuracy_score(testY, y_pred) # + #pwd # + # #ls # + #torch.save(model, 'saved_model_best_last_try') # - # # + # dummy_input = torch.randn(10, 1, 64, 64, device='cuda') # torch.onnx.export(model,dummy_input,"best.onnx", verbose=True) # + code_folding=[] #Change model by GPU(cuda)0 on model for CPU if torch.cuda.is_available(): model = model.cpu() # - permute = torch.randperm(nTest, device='cuda') x = testX[permute,:,:,:] # + torch.onnx.export(model, x, "model_opti.onnx", export_params=True, opset_version=10, do_constant_folding=True, input_names = ['input'], output_names = ['output'], dynamic_axes={'input' : {0 : 'batch_size'}, 'output' : {0 : 'batch_size'}}) # -
models/model_opti.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import mxnet as mx import numpy as np from mxnet.gluon import nn from mxnet import metric from mxnet.gluon import loss import matplotlib.pyplot as plt # ?mx.nd.ones # nd arrays = tensors mx.nd.ones(shape=(1000,), device=mx.cpu()) accuracy = metric.Accuracy() # ?accuracy.update labels = mx.nd.array([1,2,3]) predictions = mx.nd.array([1,2,3]) accuracy.update(labels=labels, preds=predictions) accuracy.get() x = np.arange(0.01,1,0.001) plt.plot(x, -np.log(x)) plt.title('Log loss when true value label = 1') plt.xlabel('precited probability') o = plt.ylabel('loss') mx.nd.array([[1,2,3,4,5]]).softmax() mx.nd.array([[1,2,3,4,5]]).softmax().sum()
mx_net.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ***Subset selection:*** # This notebook aims at demonstrating the use cases for the functions in spear library for subset selection. Subset selection is selecting a small subset of unlabeled data(or the data labeled by LFs, in case of supervised subset selection) so that it can be labeled and use that small labeled data(the L dataset) for effective training of <b>JL algorithm</b>(Cage algorithm doesn't need labeled data). Finding the best subset makes best use of the labeling efforts. import sys sys.path.append('../../') import numpy as np # ### **Random subset selection** # Here we select a random subset of instances to label. We need number of instances available and number of instances we intend to label to get a sorted numpy array of indices # + from spear.JL import rand_subset indices = rand_subset(n_all = 20, n_instances = 5) #select 5 instances from a total of 20 instances print("indices selected by rand_subset: ", indices) print("return type of rand_subset: ", type(indices)) # - # ### **Unsupervised subset selection** # Here we select a unsupervised subset(for more on this, please refer [here](https://arxiv.org/abs/2008.09887) ) of instances to label. We need feature matrix(of shape (num_instaces, num_features)) and number of instances we intend to label and we get a sorted numpy array of indices. For any other arguments to unsup_subset(or to sup_subset_indices or sup_subset_save_files) please refer documentation. # <p>For this let's first get some data(feature matrix), say from sms_pickle_U.pkl(in data_pipeline folder). For more on this pickle file, please refer the other notebook named sms_cage_jl.ipynb</p> # + from spear.utils import get_data, get_classes U_path_pkl = 'data_pipeline/JL/sms_pickle_U.pkl' #unlabelled data - don't have true labels data_U = get_data(U_path_pkl, check_shapes=True) x_U = data_U[0] #the feature matrix print("x_U shape: ", x_U.shape) print("x_U type: ", type(x_U)) # - # Now that we have feature matrix, let's select the indices to label from it. After labeling(through a trustable means) those instances, whose indices(index with respect to feature matrix) are given by the following function, one can pass them as gold_labels to the PreLabels class in the process for labeling the subset-selected data and forming a pickle file. # + from spear.JL import unsup_subset indices = unsup_subset(x_train = x_U, n_unsup = 20) print("first 10 indices given by unsup_subset: ", indices[:10]) print("return type of unsup_subset: ", type(indices)) # - # ### **Supervised subset selection** # Here we select a supervised subset(for more on this, please refer [here](https://arxiv.org/abs/2008.09887) ) of instances to label. We need # * path to json file having information about classes # * path to pickle file generated by feature matrix after labeling using LFs # * number of instances we intend to label # # <p>we get a sorted numpy array of indices.</p> # <p>For this let's use sms_json.json, sms_pickle_U.pkl(in data_pipeline folder). For more on this json/pickle file, please refer the other notebook named sms_cage_jl.ipynb</p> # + from spear.JL import sup_subset_indices U_path_pkl = 'data_pipeline/JL/sms_pickle_U.pkl' #unlabelled data - don't have true labels path_json = 'data_pipeline/JL/sms_json.json' indices = sup_subset_indices(path_json = path_json, path_pkl = U_path_pkl, n_sup = 100, qc = 0.85) print("first 10 indices given by sup_subset: ", indices[:10]) print("return type of sup_subset: ", type(indices)) # - # Instead of just getting indices to already labeled data(stored in pickle format, using LFs), we also provide the following utility to split the input pickle file and save two pickle files on the basis of subset selection. Make sure that path_save_L and path_save_U are <b>EMPTY</b> pickle file. You can still get the return value of subset-selected indices. # + from spear.JL import sup_subset_save_files U_path_pkl = 'data_pipeline/JL/sms_pickle_U.pkl' #unlabelled data - don't have true labels path_json = 'data_pipeline/JL/sms_json.json' path_save_L = 'data_pipeline/JL/sup_subset_L.pkl' path_save_U = 'data_pipeline/JL/sup_subset_U.pkl' indices = sup_subset_save_files(path_json = path_json, path_pkl = U_path_pkl, path_save_L = path_save_L, \ path_save_U = path_save_U, n_sup = 100, qc = 0.85) print("first 10 indices given by sup_subset: ", indices[:10]) print("return type of sup_subset: ", type(indices)) # - # ### **Inserting true labels into pickle files** # Now after doing supervised subset selection, say we get two pickle files path_save_L and path_save_U. Now say you labeled the instances of path_save_L and want to insert them into pickle file. So here, instead of going over the process of generating pickle through PreLabels again, you can use the following function to create a new pickle file, which now contain true labels, using path_save_L pickle file. There is no return value to this function. Make sure that path_save, the pickle file path that is to be formed with the data in path_save_L file and true labels, is <b>EMPTY</b> # + from spear.JL import insert_true_labels path_save_L = 'data_pipeline/JL/sup_subset_L.pkl' path_save_labeled = 'data_pipeline/JL/sup_subset_labeled_L.pkl' labels = np.random.randint(0,2,[100, 1]) ''' Above is just a random association of labels used for demo. In real time user has to label the instances in path_save_L with a trustable means and use it here. Note that the shape of labels is (num_instances, 1) and just for reference, feature_matrix(the first element in pickle file) in path_save_L has shape (num_instances, num_features). ''' insert_true_labels(path = path_save_L, path_save = path_save_labeled, labels = labels) # - # A similar function as insert_true_labels called replace_in_pkl is also made available to make changes to pickle file. replace_in_pkl usage is demonstrated below. Note that replace_in_pkl doesn't edit the pickle file, instead creates a new pickle file. Make sure that path_save, the pickle file path that is to be formed with the data in path file and a new numpy array, is <b>EMPTY</b>. There is no return value for this function too. # <p>It is highly advised to use insert_true_labels function for the purpose of inserting the labels since it does some other necessary changes.</p> # + from spear.JL import replace_in_pkl path_labeled = 'data_pipeline/JL/sup_subset_labeled_L.pkl' # aka path_save_labeled path_save_altered = 'data_pipeline/JL/sup_subset_altered_L.pkl' np_array = np.random.randint(0,2,[100, 1]) #we are just replacing the labels we inserted before index = 3 ''' index refers to the element we intend to replace. Refer documentaion(specifically spear.utils.data_editor.get_data) to understand which numpy array an index value maps to(order the contents of pickle file from 0 to 8). Index should be in range [0,8]. ''' replace_in_pkl(path = path_labeled, path_save = path_save_altered, np_array = np_array, index = index) # - # ### **Demonstrating the use of labeled subset-selected data** # Now that we have our subset(labeled) in path_save_labeled, lets see a use case by calling a member function of JL class using path_save_labeled as our path to L data. # + from spear.JL import JL n_lfs = 16 n_features = 1024 n_hidden = 512 feature_model = 'nn' path_json = 'data_pipeline/JL/sms_json.json' jl = JL(path_json = path_json, n_lfs = n_lfs, n_features = n_features, n_hidden = n_hidden, \ feature_model = feature_model) L_path_pkl = path_save_labeled #Labeled data - have true labels U_path_pkl = path_save_U #unlabelled data - don't have true labels V_path_pkl = 'data_pipeline/JL/sms_pickle_V.pkl' #validation data - have true labels T_path_pkl = 'data_pipeline/JL/sms_pickle_T.pkl' #test data - have true labels log_path_jl_1 = 'log/JL/jl_log_1.txt' loss_func_mask = [1,1,1,1,1,1,1] batch_size = 150 lr_fm = 0.0005 lr_gm = 0.01 use_accuracy_score = False probs_fm, probs_gm = jl.fit_and_predict_proba(path_L = L_path_pkl, path_U = U_path_pkl, path_V = V_path_pkl, \ path_T = T_path_pkl, loss_func_mask = loss_func_mask, batch_size = batch_size, lr_fm = lr_fm, lr_gm = \ lr_gm, use_accuracy_score = use_accuracy_score, path_log = log_path_jl_1, return_gm = True, n_epochs = \ 100, start_len = 7,stop_len = 10, is_qt = True, is_qc = True, qt = 0.9, qc = 0.85, metric_avg = 'binary') labels = np.argmax(probs_fm, 1) print("probs_fm shape: ", probs_fm.shape) print("probs_gm shape: ", probs_gm.shape)
notebooks/SMS_SPAM/subset_selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('base') # language: python # name: python3 # --- # + import pandas as pd import numpy as np df = pd.DataFrame() arr = np.array([1,2,3,4,5]) d_from_arr = pd.DataFrame(arr) d_from_arr # - d_from_arr = pd.DataFrame(arr, columns =["Elements"]) d_from_arr data = {'col_1':[3,2,1,0],'col_2':['a','b','c','d']} d_from_dict = pd.DataFrame.from_dict(data) d_from_dict d_from_dict.columns
Pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Escalonamento de Features # Dados compostos por atributos com valores em diferentes ordens de grandeza podem afetar negativamente o desempenho de algoritmos de aprendizado de máquina como: # # - kNN com distância euclidiana # - Logistic regression, SVM, Perceptrons e Redes Neurais # - K-means # - Linear discriminant analysis (LDA), principal component analysis (PCA), kernel PCA. # # ![image.png](attachment:image.png) # # Algoritmos não afetados pelo escalonamento de features: # - Naive Bayes # - Modelos baseados em árvore como árvores de decisão, Random Forest e Gradient Boosting # # Desse modo é frequentemente útil realizar transformações de modo que as features possam ter valores com ordens de grandeza semelhantes, em outras palavras, com escalas semelhantes. # # Duas técnicas bastante usadas para deixar as features com escalas semelhantes são: # - Normalização (*Normalization*) # - Padronização (*Standardization*) # # **IMPORTANTE:** O ajuste (*fit*) deve ser realizado sobre o conjunto de treino, mas a transformação deve ocorrer sobre ambos os conjuntos, ou seja, treino e teste. from sklearn import datasets from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn import neighbors from sklearn import linear_model from sklearn import svm import pandas as pd import matplotlib.pyplot as plt X, y = datasets.load_breast_cancer(return_X_y=True) X.shape, y.shape X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # ## Mostrar min, max, média e desvio padrão de cada coluna de X_train e X_test df_train = pd.DataFrame(X_train).describe().loc[['min', 'max', 'mean', 'std']] df_train df_test = pd.DataFrame(X_test).describe().loc[['min', 'max', 'mean', 'std']] df_test # ## Normalização ou escalonamento min-max # # - Refere-se ao escalomento de features no intervalo entre 0 e 1. # - Não assume que os dados originais estejam em uma distribuição específica. # - Funciona melhor quando a técnica de standardization não funciona bem. # - Fuciona bem quando a distribuição não for Gaussiana ou se o desvio padrão for muito pequeno. # - É sensível a outliers, portanto, não se recomenda usar quando há outliers nos dados. # # $$ x_{i}^{\prime} = \frac{ x_{i} - x_{min} }{ x_{max} - x_{min} } $$ # ![image.png](attachment:image.png) sc_norm = preprocessing.MinMaxScaler() X_train_norm = sc_norm.fit_transform(X_train) X_test_norm = sc_norm.transform(X_test) # ## Mostrar min, max, média e desvio padrão de cada coluna de X_train_norm e X_test_norm df_train_norm = pd.DataFrame(X_train_norm).describe().loc[['min', 'max', 'mean', 'std']] df_train_norm df_train[0] # ## Comparar histogramas da primeira feature: dataset original x normalizado import numpy as np x_ = np.random.uniform(0, 1000, size=(1000, 1)) plt.hist(x_, bins=50); sc_norm = preprocessing.MinMaxScaler() x_norm = sc_norm.fit_transform(x_) plt.hist(x_norm, bins=50); sc_std = preprocessing.StandardScaler() x_std = sc_std.fit_transform(x_) plt.hist(x_std, bins=50); idx = 0 plt.hist(X_train[:, idx], bins=50); plt.hist(X_train_norm[:, idx], bins=50); df_test_norm = pd.DataFrame(X_test_norm).describe().loc[['min', 'max', 'mean', 'std']] df_test_norm idx = 0 plt.hist(X_test[:, idx], bins=50); plt.hist(X_test_norm[:, idx], bins=50); # ## Padronização ou *Standardization* # # - Centraliza a distribuição dos dados no valor 0 e o desvio padrão no valor 1. # - Assume que seus dados estão em uma distribuição normal. # - Melhor usado quando o atributo está em uma distribuição normal ou # - Gera dados sem limites predefinidos. # # $$ x_{i}^{\prime} = \frac{ x_{i} - \mu_x }{ \sigma_x } $$ sc_std = preprocessing.StandardScaler() X_train_std = sc_std.fit_transform(X_train) X_test_std = sc_std.transform(X_test) # ## Mostrar min, max, média e desvio padrão de cada coluna de X_train_std e X_test_std df_train_std = pd.DataFrame(X_train_std).describe().loc[['min', 'max', 'mean', 'std']] df_train_std.round(3) df_test_std = pd.DataFrame(X_test_std).describe().loc[['min', 'max', 'mean', 'std']] df_test_std.round(3) # ## Comparar histogramas da primeira feature: dataset original x standadized idx = 0 plt.hist(X_train[:, idx], bins=10); idx = 0 plt.hist(X_train[:, idx], bins=50); idx = 0 plt.hist(X_train_std[:, idx], bins=50); plt.hist(X_test[:, idx], bins=50); plt.hist(X_test_std[:, idx], bins=50); # ## Calcule a acurácia no conjunto de teste (X_test, y_test) usando o classificador SVM model = svm.SVC(gamma='auto') model.fit(X_train, y_train) model.score(X_test, y_test) # ## Calcule a acurácia no conjunto de teste normalizado (X_test_norm, y_test) usando o classificador SVM model_norm = svm.SVC(gamma='auto') model_norm.fit(X_train_norm, y_train) model_norm.score(X_test_norm, y_test) # ## Calcule a acurácia no conjunto de teste standardizado (X_test_std, y_test) usando o classificador SVM model_std = svm.SVC(gamma='auto') model_std.fit(X_train_std, y_train) model_std.score(X_test_std, y_test)
2019/06-Tecnicas_para_Melhoria_de_Resultados/1-Escalonamento_de_Features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %pylab inline from jax.scipy.ndimage import map_coordinates from constant import * import warnings from jax import jit, partial, vmap from tqdm import tqdm warnings.filterwarnings("ignore") # ### State # $$x = [w,n,m,s,e,o]$$ # $w$: wealth level size: 20 # $n$: 401k level size: 10 # $m$: mortgage level size: 10 # $s$: economic state size: 8 # $e$: employment state size: 2 # $o$: housing state: size: 2 # # ### Action # $c$: consumption amount size: 20 # $b$: bond investment size: 20 # $k$: stock investment derived from budget constrain once $c$ and $b$ are determined. # $h$: housing consumption size, related to housing status and consumption level # # If $O = 1$, the agent owns a house: # $A = [c, b, k, h=H, action = 1]$ sold the house # $A = [c, b, k, h=H, action = 0]$ keep the house # # If $O = 0$, the agent do not own a house: # $A = [c, b, k, h= \frac{c}{\alpha} \frac{1-\alpha}{pr}, action = 0]$ keep renting the house # $A = [c, b, k, h= \frac{c}{\alpha} \frac{1-\alpha}{pr}, action = 1]$ buy a housing with H unit # # ### Housing # 20% down payment of mortgage, fix mortgage rate, single housing unit available, from age between 20 and 50, agents could choose to buy a house, and could choose to sell the house at any moment. $H = 1000$ # %%time for t in tqdm(range(T_max-1,T_min-1, -1)): if t == T_max-1: v,cbkha = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t]))(Xs) else: v,cbkha = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t+1]))(Xs) Vgrid[:,:,:,:,:,:,t] = v.reshape(dim) cgrid[:,:,:,:,:,:,t] = cbkha[:,0].reshape(dim) bgrid[:,:,:,:,:,:,t] = cbkha[:,1].reshape(dim) kgrid[:,:,:,:,:,:,t] = cbkha[:,2].reshape(dim) hgrid[:,:,:,:,:,:,t] = cbkha[:,3].reshape(dim) agrid[:,:,:,:,:,:,t] = cbkha[:,4].reshape(dim) np.save("Value03",Vgrid)
20210519/housing_force03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="1YI5Zm7VDTH5" # # Rethinking Statistics course in NumPyro - Week 8 # + [markdown] id="XQXYsrXJDTID" # Lecture 15: Multilevel Models # # - [Video](https://www.youtube.com/watch?v=AALYPv5xSos) # - [Slides](https://speakerdeck.com/rmcelreath/l15-statistical-rethinking-winter-2019) # # Lecture 16: Multilevel Models 2 # # - [Video](https://www.youtube.com/watch?v=ZG3Oe35R5sY) # - [Slides](https://speakerdeck.com/rmcelreath/l16-statistical-rethinking-winter-2019) # # [Proposed problems](https://github.com/gbosquechacon/statrethinking_winter2019/blob/master/homework/week08.pdf) and [solutions in R](https://github.com/gbosquechacon/statrethinking_winter2019/blob/master/homework/week08_solutions.pdf) for the exercises of the week. # + id="HNiX_SQ3DXpP" import pandas as pd from jax import random from jax.scipy.special import expit as logistic import numpyro from numpyro.infer import NUTS, MCMC, Predictive import numpyro.distributions as dist from numpyro.distributions.transforms import OrderedTransform import seaborn as sns import arviz as az # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 5159, "status": "ok", "timestamp": 1609361667288, "user": {"displayName": "<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="jOlA5w2QD9Xg" outputId="20208c68-8e97-4d89-c918-58fccbe8e55a" # %load_ext watermark # %watermark -n -u -v -iv -w # - sns.set_style('whitegrid') rng_key = random.PRNGKey(0) # + [markdown] id="vJkV1epwDTIF" # ## Exercise 1 # + [markdown] id="Y7mqJNA1DTIF" # > Revisit the Reed frog survival data, `reedfrogs`, and add the predation and size treatment variables to the varying intercepts model. Consider models with either predictor alone, both predictors, as well as a model including their interaction. What do you infer about the causal influence of these predictor variables? Also focus on the inferred variation across tanks (the σ across tanks). Explain why it changes as it does across models with different predictors included. # + [markdown] id="e4_mQ2iwDTIH" # Let's get the data. # + colab={"base_uri": "https://localhost:8080/", "height": 142} executionInfo={"elapsed": 843, "status": "ok", "timestamp": 1609361674428, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="T0JmJamDDTII" outputId="99b55e95-0e0c-471b-b70f-7c0a20078831" d = pd.read_csv('./dat/reedfrogs.csv', header=0, sep=';') d['tank'] = d.index d['pred'] = pd.factorize(d['pred'])[0] d['volume'] = d['size'].replace({'small':0, 'big':1}) d.head(3) # + [markdown] id="4XMOL0jMDTIK" # Now to define a series of models. The first is just the varying intercepts model from the text. # + [markdown] id="Drp3P2sPJR47" # Model 1: # + id="m0r8LeVQJCdE" def model(tank, density, surv=None): # priors a_bar = numpyro.sample('a_bar', dist.Normal(0, 1.5)) sigma = numpyro.sample('sigma', dist.Exponential(1)) a = numpyro.sample('a', dist.Normal(a_bar,sigma), sample_shape=tank.shape) # likelihood logits = a[tank] numpyro.sample('surv_hat', dist.Binomial(density, logits=logits), obs=surv) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 15586, "status": "ok", "timestamp": 1609364410322, "user": {"displayName": "Andr\u00<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="gIVH800SLAEP" outputId="bb6262b8-cc19-474a-a417-8e5568db0dd7" kernel = NUTS(model) mcmc = MCMC(kernel, num_warmup=500, num_samples=500, num_chains=4, chain_method='sequential') dat = {k:v.to_numpy() for k,v in d[['tank', 'density', 'surv']].items()} mcmc.run(rng_key, **dat) # + id="d7fhmoWIfRLQ" samples_11 = az.from_numpyro(mcmc) # + [markdown] id="bxoOaklCeFIG" # Model 2 (pred): # + id="9dgzr_JceHeL" def model(tank, density, pred, surv=None): # priors a_bar = numpyro.sample('a_bar', dist.Normal(0, 1.5)) sigma = numpyro.sample('sigma', dist.Exponential(1)) a = numpyro.sample('a', dist.Normal(a_bar,sigma), sample_shape=tank.shape) bp = numpyro.sample('bp', dist.Normal(-0.5,1)) # likelihood logits = a[tank] + bp*pred numpyro.sample('surv_hat', dist.Binomial(density, logits=logits), obs=surv) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 31572, "status": "ok", "timestamp": 1609364427006, "user": {"displayName": "Andr\u00e9<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="j1hkuGEpexVL" outputId="c472fb8a-3e10-4b49-b947-9d558711c7f1" kernel = NUTS(model) mcmc = MCMC(kernel, num_warmup=500, num_samples=500, num_chains=4, chain_method='sequential') dat = {k:v.to_numpy() for k,v in d[['tank', 'density', 'pred', 'surv']].items()} mcmc.run(rng_key, **dat) # + id="x8yH16vZfTsH" samples_12 = az.from_numpyro(mcmc) # + [markdown] id="YvEqo4ase-dU" # Model 3 (size): # + id="hVc2skh9fAyL" def model(tank, density, volume, surv=None): # priors a_bar = numpyro.sample('a_bar', dist.Normal(0, 1.5)) sigma = numpyro.sample('sigma', dist.Exponential(1)) a = numpyro.sample('a', dist.Normal(a_bar,sigma), sample_shape=tank.shape) s = numpyro.sample('s', dist.Normal(0,0.5), sample_shape=d.volume.unique().shape) # likelihood logits = a[tank] + s[volume] numpyro.sample('surv_hat', dist.Binomial(density, logits=logits), obs=surv) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 47599, "status": "ok", "timestamp": 1609364443698, "user": {"displayName": "<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="XggMJ9AGfA6v" outputId="96ea18d6-fdc3-4034-9a09-177591fcd8b5" kernel = NUTS(model) mcmc = MCMC(kernel, num_warmup=500, num_samples=500, num_chains=4, chain_method='sequential') dat = {k:v.to_numpy() for k,v in d[['tank', 'density', 'volume', 'surv']].items()} mcmc.run(rng_key, **dat) # + id="RqrL2ZQJqURk" samples_13 = az.from_numpyro(mcmc) # + [markdown] id="sqoG1za3DTIO" # Model 4 (pred + size): # + id="2oDnyX6hqaZb" def model(tank, density, pred, volume, surv=None): # priors a_bar = numpyro.sample('a_bar', dist.Normal(0, 1.5)) sigma = numpyro.sample('sigma', dist.Exponential(1)) a = numpyro.sample('a', dist.Normal(a_bar,sigma), sample_shape=tank.shape) bp = numpyro.sample('bp', dist.Normal(-0.5,1)) s = numpyro.sample('s', dist.Normal(0,0.5), sample_shape=d.volume.unique().shape) # likelihood logits = a[tank] + bp*pred + s[volume] numpyro.sample('surv_hat', dist.Binomial(density, logits=logits), obs=surv) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 64999, "status": "ok", "timestamp": 1609364462062, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="YlICg_fIqtBF" outputId="3a36db76-87e3-47eb-a60d-9670adc4370a" kernel = NUTS(model) mcmc = MCMC(kernel, num_warmup=500, num_samples=500, num_chains=4, chain_method='sequential') dat = {k:v.to_numpy() for k,v in d[['tank', 'density', 'pred', 'volume', 'surv']].items()} mcmc.run(rng_key, **dat) # + id="X7N47Ammq4gS" samples_14 = az.from_numpyro(mcmc) # + [markdown] id="QUF74m4UDTIP" # Model 5 (pred + size + interaction): # + id="unjwwWC-NHWX" def model(tank, density, pred, volume, surv=None): # priors a_bar = numpyro.sample('a_bar', dist.Normal(0, 1.5)) sigma = numpyro.sample('sigma', dist.Exponential(1)) a = numpyro.sample('a', dist.Normal(a_bar,sigma), sample_shape=tank.shape) bp = numpyro.sample('bp', dist.Normal(-0.5,1), sample_shape=d.volume.unique().shape) s = numpyro.sample('s', dist.Normal(0,0.5), sample_shape=d.volume.unique().shape) # likelihood logits = a[tank] + bp[volume]*pred + s[volume] numpyro.sample('surv_hat', dist.Binomial(density, logits=logits), obs=surv) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 81098, "status": "ok", "timestamp": 1609364480566, "user": {"displayName": "Andr\u00e9<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="rv_mkoHFOd8O" outputId="3284772a-740b-4de3-d86d-d0e127550b2d" kernel = NUTS(model) mcmc = MCMC(kernel, num_warmup=500, num_samples=500, num_chains=4, chain_method='sequential') dat = {k:v.to_numpy() for k,v in d[['tank', 'density', 'pred', 'volume', 'surv']].items()} mcmc.run(rng_key, **dat) # + id="EzBc3XmEOfzB" samples_15 = az.from_numpyro(mcmc) # + [markdown] id="XoewvGYeDTIP" # I coded the interaction model (`samples_5`) using a non-centered parameterization. The interaction itself is done by creating a `bp` parameter for each size value. In this way, the effect of `pred` depends upon `volume`. First let's consider the WAIC scores: # - az.compare({'model_11': samples_11, 'model_12': samples_12, 'model_13': samples_13, 'model_14': samples_14, 'model_15': samples_15} , scale='deviance') # + [markdown] id="Vg-9gtDZDTIQ" # These models are really very similar in expected out-of-sample accuracy. The tank variation is huge. But take a look at the posterior distributions for predation and size. You'll see that predation does seem to matter, as you'd expect. Size matters a lot less. So while predation doesn't explain much of the total variation, there is plenty of evidence that it is a real effect. Remember: We don't select a model using WAIC # (or LOO). A predictor can make little difference in total accuracy but still be a real causal effect. # # Let's look at all the sigma posterior distributions: # + colab={"base_uri": "https://localhost:8080/", "height": 241} executionInfo={"elapsed": 1359, "status": "ok", "timestamp": 1609365078554, "user": {"displayName": "<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="PN0RrIp0lxWW" outputId="f6db69b3-56ee-4d2d-c492-1fa35a4548db" az.plot_forest({ 'model_11': samples_11.posterior['sigma'], 'model_12': samples_12.posterior['sigma'], 'model_13': samples_13.posterior['sigma'], 'model_14': samples_14.posterior['sigma'], 'model_15': samples_15.posterior['sigma'] }, combined=True, figsize=(9, 3)); # + [markdown] id="lG7JJ--0DTIR" # The two models that omit predation, `model_11` and `model_13`, have larger values of `sigma`. This is because predation explains some of the variation among tanks. So when you add it to the model, the variation in the tank intercepts gets smaller. # + [markdown] id="aMrsnV4WDTIR" # ## Exercise 2 # + [markdown] id="EvFqYEslDTIS" # > In 1980, a typical Bengali woman could have 5 or more children in her lifetime. By the year 2000, a typical Bengali woman had only 2 or 3. You're going to look at a historical set of data, when contraception was widely available but many families chose not to use it. These data reside in data `bangladesh` and come from the 1988 Bangladesh Fertility Survey. Each row is one of 1934 women. There are six variables, but you can focus on two of them for this practice problem: # # > 1. `district`: ID number of administrative district each woman resided in # 2. `use.contraception`: An indicator (0/1) of whether the woman was using contraception # + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 735, "status": "ok", "timestamp": 1609493492049, "user": {"displayName": "<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="2gJ9qonZDTIS" outputId="6b331fc3-424e-4e7c-a0fb-685d5d04fc55" d = pd.read_csv('./dat/bangladesh.csv', header=0, sep=';') d['did'] = pd.factorize(d['district'])[0] d['conc'] = d['use.contraception'] d.head() # + [markdown] id="OeV5mwHCDTIT" # Now for the ordinary fixed effect model: # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 16015, "status": "ok", "timestamp": 1609493507992, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="R7hkawCELhdg" outputId="6b48e36b-011d-4070-9fab-eec6f049e4b4" def model(did, conc=None): # priors a = numpyro.sample('a', dist.Normal(0, 1.5), sample_shape=d.did.unique().shape) # likelihood logits = a[did] numpyro.sample('conc_hat', dist.Bernoulli(logits=logits), obs=conc) kernel = NUTS(model) mcmc = MCMC(kernel, num_warmup=500, num_samples=500, num_chains=4, chain_method='sequential') mcmc.run(random.PRNGKey(0), d.did.values, d.conc.values) samples_21 = az.from_numpyro(mcmc) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 32037, "status": "ok", "timestamp": 1609493524449, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="21s7J_FsN913" outputId="28381abc-92d0-47ea-98c1-fe51dd0a4f8e" def model(did, conc=None): # priors a_bar = numpyro.sample('a_bar', dist.Normal(0,1.5)) sigma = numpyro.sample('sigma', dist.Exponential(1)) a = numpyro.sample('a', dist.Normal(a_bar,sigma), sample_shape=d.did.unique().shape) # likelihood logits = a[did] numpyro.sample('conc_hat', dist.Bernoulli(logits=logits), obs=conc) kernel = NUTS(model) mcmc = MCMC(kernel, num_warmup=500, num_samples=500, num_chains=4, chain_method='sequential') mcmc.run(rng_key, d.did.values, d.conc.values) samples_22 = az.from_numpyro(mcmc) # + [markdown] id="ncXzdoEFDTIV" # Now let's extract the samples, compute posterior mean probabilities in each district, and plot it all: # + id="MnoBMN6KDTIW" model_21 = pd.DataFrame(logistic(samples_21.posterior['a'].mean(axis=0).mean(axis=0))) model_22 = pd.DataFrame(logistic(samples_22.posterior['a'].mean(axis=0).mean(axis=0))) aux = pd.concat([model_21.assign(model='model_21'), model_22.assign(model='model_22')]).rename(columns={0:'prob'}).reset_index() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 544, "status": "ok", "timestamp": 1609493535698, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="vlRMRkRrDTIW" outputId="07c4378c-9991-48ec-a60a-7d566e6ae95f" logistic(samples_22.posterior['a_bar'].mean()) # use it for the red line in the next plot # + [markdown] id="0Ao9qTbIDTIX" # The blue points are the fixed estimations. The _orange ones_ points are the varying effects. As you'd expect, they are shrunk towards the mean (the _red_ line). Some are shrunk more than others. The third district from the left shrunk a lot. Let's look at the sample size in each district: # + colab={"base_uri": "https://localhost:8080/", "height": 202} executionInfo={"elapsed": 1158, "status": "ok", "timestamp": 1609495030038, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="AIAT_e3xWGAD" outputId="ce4cb0f7-afe5-40b1-96c0-7a2a4a42ea50" g = sns.scatterplot(data=aux, x='index', y='prob', hue='model', s=50) g.set(ylim=(0,1), xlabel='district', ylabel='prob. use contraception') g.figure.set_size_inches(12,2.5) g.axhline(0.3679, ls='--', c='r'); # + [markdown] id="PBbjzGMkDTIY" # District 3 has only 2 women sampled. So it shrinks a lot. There are couple of other districts, like 49 and 54, that also have very few women sampled. But their fixed estimates aren't as extreme, so they don't shrink as much as district 3 does. # # All of this is explained by partial pooling, of course. # + colab={"base_uri": "https://localhost:8080/", "height": 100} executionInfo={"elapsed": 857, "status": "ok", "timestamp": 1609494963912, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="jFApQAMBDTIY" outputId="448c0b4e-89aa-40fb-ff0f-e0d538ad89c9" pd.DataFrame(d.district.value_counts().sort_index()).T # + [markdown] id="Uur4Nc9ZDTIY" # ## Exercise 3 # + [markdown] id="umXdqjrgDTIY" # Return to the Trolley data, (`Trolley`), from Chapter 12. Define and fit a varying intercepts model for these data. By this I mean to add an intercept parameter for the individual to the linear model. Cluster the varying intercepts on individual participants, as indicated by the unique values in the id variable. Include action, intention, and contact as before. Compare the varying intercepts model and a model that ignores individuals, using both WAIC/LOO and posterior predictions. What is the impact of individual variation in these data? # + colab={"base_uri": "https://localhost:8080/", "height": 142} executionInfo={"elapsed": 636, "status": "ok", "timestamp": 1609496047750, "user": {"displayName": "Andr\u0<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="kkSxAc03DTIZ" outputId="5b2ff347-c218-412e-d885-10ff39c474b2" d = pd.read_csv('./dat/Trolley.csv', header=0, sep=';') elvl = d['edu'].unique() idx = [7 , 0 , 6 , 4 , 2 , 1, 3, 5] cat = pd.Categorical(d.edu, categories=list(elvl[idx]), ordered=True) d['edu_new'] = pd.factorize(cat, sort=True)[0].astype('int') d['age_std'] = (d['age'] - d['age'].mean())/d['age'].std() d['id'] = pd.factorize(d['id'])[0] d.head(3) # + [markdown] id="sExJeEvPDTIa" # First, let's load the data and re-run the old model from Chapter 12: # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 785799, "status": "ok", "timestamp": 1609497392955, "user": {"displayName": "<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="nj73M5KXbaHq" outputId="24f34729-a40f-4c5e-ce18-34dfe76b0d51" def model(action, contact, intention, response=None): # priors cutpoints = numpyro.sample('cutpoints', dist.TransformedDistribution( dist.Normal(0, 1.5).expand([6]), OrderedTransform() ), ) norm = lambda label: numpyro.sample(label, dist.Normal(0,0.5)) bA = norm('bA') bC = norm('bC') bI = norm('bI') bIA = norm('bIA') bIC = norm('bIC') # likelihood BI = bI + bIA*action + bIC*contact phi = bA*action + bC*contact + BI*intention response_hat = numpyro.sample('response_hat', dist.OrderedLogistic(phi, cutpoints), obs=response-1) # posterior sampling mcmc = MCMC(NUTS(model), num_warmup=500, num_samples=500, num_chains=4, chain_method='sequential') mcmc.run(rng_key, d.action.values, d.contact.values, d.intention.values, d.response.values) mcmc.print_summary() samples_31 = az.from_numpyro(mcmc) # + [markdown] id="7LD89ncWDTIb" # Now to run the varying intercept model, we need to build a valid individual ID variable. The IDs in the data are long tags, so we can coerce them to integers in many ways. What is important is that the index values go from 1 (_zero in python_) to the number of individuals. # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 188644, "status": "ok", "timestamp": 1609497862752, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="-kOUxmCuesXa" outputId="2196fb8a-0503-403c-8ae0-8ef4bd6a4950" def model(action, contact, intention, id, response=None): # priors cutpoints = numpyro.sample('cutpoints', dist.TransformedDistribution( dist.Normal(0, 1.5).expand([6]), OrderedTransform() ), ) norm = lambda label: numpyro.sample(label, dist.Normal(0,0.5)) bA = norm('bA') bC = norm('bC') bI = norm('bI') bIA = norm('bIA') bIC = norm('bIC') sigma = numpyro.sample('sigma', dist.Exponential(1)) a = numpyro.sample('a', dist.Normal(0, sigma), sample_shape=d.id.unique().shape) # likelihood BI = bI + bIA*action + bIC*contact phi = a[id] + bA*action + bC*contact + BI*intention response_hat = numpyro.sample('response_hat', dist.OrderedLogistic(phi, cutpoints), obs=response-1) # posterior sampling mcmc = MCMC(NUTS(model), num_warmup=500, num_samples=500, num_chains=4, chain_method='sequential') mcmc.run(rng_key, d.action.values, d.contact.values, d.intention.values, d.id.values, d.response.values) samples_32 = az.from_numpyro(mcmc) # + [markdown] id="rKfTh509DTIb" # We can begin by comparing the posterior distributions. The original coefficients are: # + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 526, "status": "ok", "timestamp": 1609497879494, "user": {"displayName": "Andr\u00<NAME>\u00e1rez", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="as16uFbXDTIb" outputId="03bc3c07-da58-4444-fd18-56fe68fc14d7" az.summary(samples_31, var_names=['bA', 'bC', 'bI', 'bIA', 'bIC'], hdi_prob=.89).round(2) # + [markdown] id="p_19DQmWDTIc" # And the new ones, having added the individual IDs, are: # + colab={"base_uri": "https://localhost:8080/", "height": 235} executionInfo={"elapsed": 659, "status": "ok", "timestamp": 1609497893241, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="Uz64hzbdDTIc" outputId="8d5ecc98-9227-4600-da73-3a6aa19e4fd8" az.summary(samples_32, var_names=['bA', 'bC', 'bI', 'bIA', 'bIC', 'sigma'], hdi_prob=.89).round(2) # + [markdown] id="WOIHeaWMDTIc" # Everything has gotten more negative. This is because there is a lot of individual variation in average rating, look at the distribution for sigma. That is on the $logit$ scale, so that's a lot of variation on the probability scale. That variation in average rating was hiding some of the effect of the treatments. We get more precision by conditioning on individual. # # The WAIC comparison can also help show how much variation comes from individual differences in average rating: # + colab={"base_uri": "https://localhost:8080/", "height": 233} executionInfo={"elapsed": 5571, "status": "ok", "timestamp": 1609498189373, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-s0kzcIwylzA/AAAAAAAAAAI/AAAAAAAAQXA/v8Sc6WgQy7c/s64/photo.jpg", "userId": "06409440331868776168"}, "user_tz": -60} id="DOt1m8HXjZJc" outputId="8ec8c748-6a0d-41db-a3b1-7c1209893213" az.compare({'model_31': samples_31, 'model_32': samples_32}, ic='waic', scale='deviance') # + [markdown] id="95Zju_FSDTId" # The WAIC difference is massive. This is consistent with individual variation in average rating being a major effect in this sample. This is all quite typical of likert-scale data, in my experience. Individuals anchor on different points and this adds noise. When we have repeat samples from the same individual, we can condition away some of that noise and get more precise estimates of the treatment effects.
_statrethink_numpyro_w08.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests import json from pathlib import Path # %%timeit url = 'https://api.publicapis.org/entries?category=animals' response = requests.get(url).text json_path = Path('animals.json') json_path.touch() json_path.write_text(response) # %%timeit # !curl -s https://api.publicapis.org/entries?category=animals > animals.json
CLI-copy/01-Ins_Flags/Solved/practical.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Creando una superficies non-manifold con mallas de triángulos y lista de nodos master-slaves asociada # + [markdown] slideshow={"slide_type": "slide"} # # Creo 3 patches conformes y luego los uno en un solo archivo # - pip install pyvista # + slideshow={"slide_type": "fragment"} import pyvista as pv import numpy as np n = 7 x = np.linspace(-6, -2, num=n) y = np.linspace(-2, 2, num=n) xx, yy = np.meshgrid(x, y) A, b = 100, 100 zz = -xx-4 points = np.c_[xx.reshape(-1), yy.reshape(-1), zz.reshape(-1)] cloud = pv.PolyData(points) surf = cloud.delaunay_2d() surf.plot(show_edges=True) surf.save("patch_1.vtk", binary=False) #opcional: guardar el patch en un archivo.vtk # + slideshow={"slide_type": "slide"} n = 7 x = np.linspace(-6, 2, num=n+2) y = np.linspace(-2, 2, num=n) xx, yy = np.meshgrid(x, y) A, b = 100, 100 zz = xx*0-2 points = np.c_[xx.reshape(-1), yy.reshape(-1), zz.reshape(-1)] cloud = pv.PolyData(points) surf2 = cloud.delaunay_2d() surf2.plot(show_edges=True) #surf2.save("patch_2.vtk") #opcional: guardar el patch en un archivo.vtk # + slideshow={"slide_type": "slide"} n = 7 x = np.linspace(-2, 2, num=n) y = np.linspace(-2, 2, num=n) xx, yy = np.meshgrid(x, y) A, b = 100, 100 zz = xx points = np.c_[xx.reshape(-1), yy.reshape(-1), zz.reshape(-1)] cloud = pv.PolyData(points) surf3 = cloud.delaunay_2d() surf3.plot(show_edges=True) #surf3.save("patch_3.vtk") #opcional: guardar el patch en un archivo.vtk # + [markdown] slideshow={"slide_type": "slide"} # Ya tengo los 3 patches, ahora toca juntarlos en una sola malla... # + slideshow={"slide_type": "fragment"} #Expresión alternativa sumando la lectura de varios patches guardados en archivos.vtk #mesh = pv.read('patch_1.vtk')+pv.read('patch_2.vtk')+pv.read('patch_3.vtk') mesh=surf+surf2+surf3 # junto todos los patches mesh.save('non_manifold_7.vtk', binary=False) # guardo en un archivo.vtk mesh.plot(show_edges=True) # muestro un gráfico # + [markdown] slideshow={"slide_type": "slide"} # # Para identificar non manifold nodes # Se crea un array que a cada nodo le asigna un Master: # * primero se buscan los nodos únicos # * si el nodo es único es su propio master # * si el nodo se repite: # * el primer nodo es master # * los siguientes son slaves, es decir tienen como master al primero en aparecer # + slideshow={"slide_type": "slide"} import numpy as np import vtk from vtk.numpy_interface import dataset_adapter as dsa # Para convertir de vtk a numpy reader = vtk.vtkPolyDataReader() # SOURCE/READER reader.SetFileName('non_manifold_7.vtk') reader.Update() data_vtk = reader.GetOutput() data= dsa.WrapDataObject(data_vtk) # convierte la salida a array compatible con numpy N_points = data.GetNumberOfPoints() # número de nodos Points = np.array(data.Points) # nodos unq, index = np.unique(data.Points, # filtro valores uniq del array de puntos axis=0, # compara filas enteras return_index=True) # índice de los no repetidos masters = np.zeros(N_points,dtype=int) is_slave = np.zeros(N_points, dtype=int) for i in range(0, N_points): # para cada nodo i if i in index: # si es único masters[i] = i # su master es i is_slave[i] = False else: # si no es único for j in range(0,i): # comparo con cada nodo ANTERIOR j if np.all(Points[i]==Points[j]): # si es igual al nodo j (el primero que aparece) masters[i] = j # su master es j is_slave[i] = True break # detengo el bucle for masters, is_slave , np.sum(is_slave) , np.all(Points==Points[masters]) # si da True el algoritmo para los masters funciona bien ## Altenativa !!!!!! ## hacer lista de índices repetidos... restando index de la lista total.. ## todos los elementos son igual a su master ## para los elementos de la lista de los repetidos... buscar dentro de la lista de los repetidos... # - Cells=Cells.reshape(int(len(Cells)/4),4) surf = pv.PolyData(Points, Cells) surf.plot(show_edges=True) # ## Identifico bordes "Creases" bordes = vtk.vtkFeatureEdges() bordes.SetInputData(data_vtk) bordes.BoundaryEdgesOn() bordes.FeatureEdgesOff() bordes.Update() b=bordes.GetOutput() b.GetNumberOfPoints() is_creases=np.zeros(N_points, dtype=int) # array boolean vacío for i in range(0,N_points): # Para cada nodo de la malla i p=data_vtk.GetPoint(i) # obtengo su posición [x,y,z] B_point=b.GetPoint(b.FindPoint(p)) # de los puntos del borde, busco el más cercano [a x,y,z] val=B_point==p # verifico si ese nodo es del borde is_creases[i]=val # guarda 1 si es del borde, 0 si no is_creases, len(is_creases), sum(is_creases) # hay creases repetidos por master-slave # Filtrando algunos puntos según sus valores en (x, y, z) para fijar condiciones de contorno # + from vtk.numpy_interface import dataset_adapter as dsa data = dsa.WrapDataObject(data_vtk) puntos=np.array(data.Points) #selecciono puntos con z=0 bordes superiores bound0=np.array(puntos[:,2]==0, dtype=int) #puntos con z=-2 y x=-6 borde inferior izquierdo bound1=np.array((np.array(puntos[:,2]==-2, dtype=int)+ np.array(puntos[:,0]==-6,dtype=int))>1,dtype=int) #puntos con z=-2 y x=2 borde inferior derecho bound2=np.array((np.array(puntos[:,2]==-2, dtype=int)+ np.array(puntos[:,0]==2,dtype=int))>1, dtype=int) bound0, bound1, bound2, len(bound0) # - # Todos los nodos en los que se fijan Boundary Conditions se guardan en un archivo *BNodes.vtk ??? # # guardo en un vector is_constrained is_constrained=((bound0+bound1+bound2*2)!=0).astype(int) is_constrained, sum(is_constrained) # + bound = bound0+bound1+bound2 # array boolean de todos los nodos con BC puntosBC=np.zeros([sum(bound),3]) num=0 nodes=np.zeros(sum(bound)) Points = vtk.vtkPoints() # vtkPoints for i in range(0, len(bound)): if bound[i]: puntosBC[num,:]=data_vtk.GetPoint(i) Points.InsertNextPoint(data_vtk.GetPoint(i)) nodes[num]=i num+=1 puntosBC, puntosBC.shape, nodes # + BCs=dsa.numpyTovtkDataArray(puntosBC,name = "boundary points") # convierto la lista master a un objeto vtkDataArray polydata = vtk.vtkPolyData() # creo un objeto vtkPolyData polydata.SetPoints(Points) NodesId=dsa.numpyTovtkDataArray(nodes, name="NodesId") polydata.GetPointData().AddArray(NodesId) writer = vtk.vtkPolyDataWriter() # creo el objeto PolyDataWriter writer.SetFileName('BNodes.vtk') writer.SetInputData(polydata) # le conecto los datos data_vtk writer.SetFileType(1) # opcional: set file type to ascii print(writer.Write()) # escribo el archivo .vtk Returns 1 on success and 0 on failure. del(writer) # - # !cat BNodes.vtk # Aquellos valores con $z=-2$ y $x=-6$, o $x=2$, es decirl los bordes de la base, serán los nodos a desplazar, por lo que se los agrega en el archivo "BDispl" # + displ=bound1+bound2 dis=np.zeros(b.GetNumberOfPoints()) num=0 for i in range(0, len(displ)): if displ[i]: #print(i) dis[num]=i num+=1 dis=dis[0:num] dis ones=np.ones(num, dtype=int) zeros=np.zeros(num, dtype=int) Dis=np.zeros([num, 7], dtype=int) Dis[:,0]=dis Dis[:,1:8]=np.column_stack((ones, ones, ones, zeros, zeros, ones)) Dis # - header=str(num)+' '+str(7) np.savetxt('Bdispl.txt', Dis , delimiter=' ', fmt='%u', header=header) # !cat ejemplo.txt # Aquellos valores con $Z=0$ quedarán fijos por lo que pondré constraint tags A=np.zeros([N_points,3], dtype=int) A[:,0]=bound0 A[:,1]=bound0 A[:,2]=bound0 A # + [markdown] slideshow={"slide_type": "slide"} # El array obtenido con los masters de cada nodo es un numpy.array, entonces: # * lo convierto a objeto vtk # * se doy como argumento al PointData de la malla # * escribo el nuevo archivo # + slideshow={"slide_type": "fragment"} #el objeto data_vtk ya contiene el archivo base con point y conecc Masters=dsa.numpyTovtkDataArray(masters, name = "Masters" ) # convierto la lista master a un objeto vtkDataArray data_vtk.GetPointData().SetScalars(Masters) # agrego en PointData al vtkArray llamado Master (NO ES EL data CONVERTIDO A NUMPY) Is_slave=dsa.numpyTovtkDataArray(is_slave, name="Is_slave") data_vtk.GetPointData().AddArray(Is_slave) Is_creases=dsa.numpyTovtkDataArray(is_creases,name = "CreasesId") # convierto la lista master a un objeto vtkDataArray data_vtk.GetPointData().SetScalars(Is_creases) # para agregar un segundo atributo uso AddArray() ver http://vtk.1045678.n5.nabble.com/Multiple-fields-scalars-vectors-td1224224.html constraint=dsa.numpyTovtkDataArray(A, name= "ConstraintTags") data_vtk.GetPointData().AddArray(constraint) writer = vtk.vtkPolyDataWriter() # creo el objeto PolyDataWriter writer.SetFileName('non_manifold_masters.vtk') # le pongo nombre al archivo de salida writer.SetInputData(data_vtk) # le conecto los datos data_vtk writer.SetFileType(1) # opcional: set file type to ascii print(writer.Write()) # escribo el archivo .vtk Returns 1 on success and 0 on failure. del(writer) # + [markdown] slideshow={"slide_type": "slide"} # Verificando el contenido del archivo guardado vemos los atributos Point Data agregados # + slideshow={"slide_type": "fragment"} # !head -n 10 non_manifold_masters.vtk && echo '\n...\n' && tail -n 5 non_manifold_masters.vtk # - bordes = vtk.vtkFeatureEdges() bordes.SetInputData(data) bordes.BoundaryEdgesOn() bordes.FeatureEdgesOn() bordes.ManifoldEdgesOn() bordes.NonManifoldEdgesOff() bordes.Update() # # Arreglo conectividad NON-MANIFOLD sin Master-Slave # + import pyvista as pv import numpy as np Points = np.array([[0, 1, 0], [1, 1, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [1, 0, 0], [0,-1, 0], [1,-1, 0]]) is_slave = np.array([0,0,0,0,1,1,0,0]) masters = np.array([0,1,2,3,2,3,6,7]) N_points=len(Points) # mesh faces Cells = np.hstack([[3,2,1,0], [3,2,3,1], [3,6,5,4], [3,6,7,5]]) surf = pv.PolyData(Points, Cells) surf.plot(show_edges=True) surf.save("simple.vtk", binary=False) # + #Arreglo conectividad NON-MANIFOLD sin Master-Slave n=N_points i=0 for j in range(0, N_points): #para cada nodo if (n<1): #cantidad de nodos restantes actualizada por los eliminados break if (is_slave[i]): #si es slave Points=np.delete(Points, i,0) #lo elimino de la lista de puntos [x,y,z] Cells[Cells==i]=masters[i] #cada valor de conectividad == id_nodo_slave pasa a ser = a su nodo master Cells[Cells>i]-=1 #cada valor de conectividad mayor al id_nodo_slave pasa a ser -=1 masters[np.where(masters[i:]>i)[0]+i]-=1 #quito uno a cada master q falta revisar si su master esta por encima de i (los q están por debajo no cambiaron su índice) is_slave=np.delete(is_slave, i) masters=np.delete(masters,i) i-=1 #vuelvo al mismo índice, ahora tendrá un nuevo nodo n-=1 #queda un nodo menos i+=1 Cells=Cells.reshape(int(len(Cells)/4),4) surf = pv.PolyData(Points, Cells) surf.plot(show_edges=True) surf.save("simple.vtk", binary=False) # - # ## Ejemplo integrado para malla Non-Manifold sin Master-Slave # + import pyvista as pv import numpy as np import vtk from vtk.numpy_interface import dataset_adapter as dsa # Para convertir de vtk a numpy reader = vtk.vtkPolyDataReader() # SOURCE/READER reader.SetFileName('non_manifold_T_9x9.vtk') reader.Update() data_vtk = reader.GetOutput() data= dsa.WrapDataObject(data_vtk) # convierte la salida a array compatible con numpy N_points = data.GetNumberOfPoints() # número de nodos Points=np.array(data.Points) Cells =np.array(data.Polygons) unq, index = np.unique(data.Points, # filtro valores uniq del array de puntos axis=0, # compara filas enteras return_index=True) # índice de los no repetidos masters = np.zeros(N_points,dtype=int) is_slave = np.zeros(N_points, dtype=int) for i in range(0, N_points): # para cada nodo i if i in index: # si es único masters[i] = i # su master es i is_slave[i] = False else: # si no es único for j in range(0,i): # comparo con cada nodo ANTERIOR j if np.all(Points[i]==Points[j]): # si es igual al nodo j (el primero que aparece) masters[i] = j # su master es j is_slave[i] = True break # detengo el bucle for #Arreglo conectividad NON-MANIFOLD sin Master-Slave ========================= n=N_points i=0 for j in range(0, N_points): #para cada nodo if (n<1): #cantidad de nodos restantes actualizada por los eliminados break if (is_slave[i]): #si es slave Points=np.delete(Points, i,0) #lo elimino de la lista de puntos [x,y,z] Cells[Cells==i]=masters[i] #cada valor de conectividad == id_nodo_slave pasa a ser = a su nodo master Cells[Cells>i]-=1 #cada valor de conectividad mayor al id_nodo_slave pasa a ser -=1 masters[np.where(masters[i:]>i)[0]+i]-=1 #quito uno a cada master q falta revisar si su master esta por encima de i (los q están por debajo no cambiaron su índice) is_slave=np.delete(is_slave, i) masters=np.delete(masters,i) i-=1 #vuelvo al mismo índice, ahora tendrá un nuevo nodo n-=1 #queda un nodo menos i+=1 Cells=Cells.reshape(int(len(Cells)/4),4) surf = pv.PolyData(Points, Cells) surf.plot(show_edges=True) surf.save('nonManifold_T_3x9x9_noMaster.vtk', binary=False) # - import numpy as np def find_points_where(lista_puntos, x=None, y=None, z=None , tol=1e-8): if (x!=None and y!=None and z!=None): # x y z X = np.where(abs(lista_puntos[:,0]-x)<tol) Y = np.where(abs(lista_puntos[:,1]-y)<tol) Z = np.where(abs(lista_puntos[:,2]-z)<tol) XY= np.intersect1d(X,Y) XYZ= np.intersect1d(XY,Z) return XYZ if (x!=None and y!=None and z==None): # x y - X = np.where(abs(lista_puntos[:,0]-x)<tol) Y = np.where(abs(lista_puntos[:,1]-y)<tol) XY= np.intersect1d(X,Y) return XY if (x!=None and y==None and z!=None): # x - z X = np.where(abs(lista_puntos[:,0]-x)<tol) Z = np.where(abs(lista_puntos[:,2]-z)<tol) XZ= np.intersect1d(X,Z) return XZ if (x!=None and y==None and z==None): # x - - X = np.where(abs(lista_puntos[:,0]-x)<tol) return X if (x==None and y!=None and z!=None): # - y z Y = np.where(abs(lista_puntos[:,1]-y)<tol) Z = np.where(abs(lista_puntos[:,2]-z)<tol) YZ= np.intersect1d(Y,Z) return YZ if (x==None and y!=None and z==None): # - y - Y = np.where(abs(lista_puntos[:,1]-y)<tol) return Y if (x==None and y==None and z!=None): # - - z Z = np.where(abs(lista_puntos[:,2]-z)<tol) return Z # + import pyvista as pv plotter = pv.Plotter() plotter.add_mesh(surf, show_edges=True, color="white") # Add labels to points on the yz plane (where x == 0) puntos = surf.points c =find_points_where(puntos, x=.5, y=0, z=0) cd=find_points_where(puntos, x=.5, y=1, z=0) ci=find_points_where(puntos, x=.5, y=-1,z=0) b1 =find_points_where(puntos, x=0, y=0, z=-1) b2 =find_points_where(puntos, x=1,y=0, z=-1) D =find_points_where(puntos, y=1, z=0) I =find_points_where(puntos, y=-1,z=0) constraints=np.zeros([len(puntos),3]) constraints[D,2]=1 constraints[I,2]=1 constraints[ci,0]=1 constraints[cd,0]=1 constraints[b1,1]=1 constraints[b2,1]=1 constraints[c]=[1,1,0] mask = np.concatenate((c,cd,ci,b2,b1), axis=0) plotter.add_point_labels( puntos[mask], puntos[mask].tolist(), point_size=30, font_size=0) plotter.show() # - # # Identifico Creases, y constraint tags # + import numpy as np import vtk #install version 8 with: $ pip install vtk==8.1.2 from vtk.numpy_interface import dataset_adapter as dsa # Cargo archivo .vtk Non-Manifold sin Master-Slave reader = vtk.vtkPolyDataReader() reader.SetFileName('nonManifold_T_3x9x9_noMaster.vtk') reader.Update() data_vtk = reader.GetOutput() data= dsa.WrapDataObject(data_vtk) puntos=np.array(data.Points) N_points = data.GetNumberOfPoints() # Identifico Creases ========================================================= bordes = vtk.vtkFeatureEdges() bordes.SetInputData(data_vtk) bordes.BoundaryEdgesOn() bordes.FeatureEdgesOff() bordes.Update() b=bordes.GetOutput() b.GetNumberOfPoints() is_creases=np.zeros(N_points, dtype=int) # array vacío for i in range(0,N_points): # Para cada nodo de la malla i p=data_vtk.GetPoint(i) # obtengo su posición [x,y,z] B_point=b.GetPoint(b.FindPoint(p)) # de los puntos del borde, busco el más cercano [a x,y,z] val=B_point==p # verifico si ese nodo es del borde is_creases[i]=val # guarda 1 si es del borde, 0 si no # Identifico Constraint tags ================================================= c =find_points_where(puntos, x=.5, y=0, z=0) cd=find_points_where(puntos, x=.5, y=1, z=0) ci=find_points_where(puntos, x=.5, y=-1,z=0) b1=find_points_where(puntos, x=0 , y=0, z=-1) b2=find_points_where(puntos, x=1 , y=0, z=-1) D =find_points_where(puntos, y=1 , z=0) I =find_points_where(puntos, y=-1, z=0) constraints=np.zeros([len(puntos),3]) constraints[c,0]=1 constraints[c,1]=1 constraints[D,2]=1 constraints[I,2]=1 constraints[ci,0]=1 constraints[cd,0]=1 constraints[b1,1]=1 constraints[b2,1]=1 constraints[c]=[1,1,0] # Creases con corner=2 ??????????????????????????????????????????????????????? c1 =find_points_where(puntos, x=0, y=1, z=0) c2 =find_points_where(puntos, x=1, y=1, z=0) c3 =find_points_where(puntos, x=0, y=-1, z=0) c4 =find_points_where(puntos, x=1, y=-1, z=0) b3=find_points_where(puntos, x=0 , y=0, z=0) b4=find_points_where(puntos, x=1 , y=0, z=0) is_creases[np.array([c1,c2,c3,c4,b1,b2,b3,b4])]=2 # Guardo un nuevo archivo .vtk =============================================== Is_creases=dsa.numpyTovtkDataArray(is_creases,name = "CreasesId") # convierto la lista master a un objeto vtkDataArray data_vtk.GetPointData().SetScalars(Is_creases) # para agregar un segundo atributo uso AddArray() ver http://vtk.1045678.n5.nabble.com/Multiple-fields-scalars-vectors-td1224224.html Constraints=dsa.numpyTovtkDataArray(constraints,name = "ConstraintTags") data_vtk.GetPointData().AddArray(Constraints) #data_vtk.GetPointData(). writer = vtk.vtkPolyDataWriter() writer.SetFileName('non_manifold_full.vtk') writer.SetInputData(data_vtk) writer.SetFileType(1) # opcional: set file type to ascii if writer.Write(): # escribo el archivo .vtk Returns 1 on success and 0 on failure. print("Archivo vtk guardado") del(writer) # Boudary displacement file ================================================== base=find_points_where(puntos, y=0, z=-1) n_displ =len(base) dis=np.zeros([n_displ, 7], dtype=int) dis[:,0]= base dis[:,3]=1 dis[:,6]=1 header=str(n_displ)+' '+str(7) np.savetxt('BDispl.pts', dis , delimiter=' ', fmt='%u', header=header) # - # # Nodos con desplazamiento impuesto Bdispl
non_Manifold.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # !pip install pydash # + from pymongo import MongoClient from functools import cmp_to_key from pathlib import Path import imagehash from tqdm.notebook import tqdm import pydash as _ import numpy as np import json import shelve from IPython.display import display, Image from ipywidgets import widgets, HBox, VBox, Box, Layout from lib.parallel import parallel from lib.sort_things import sort_images, simple_sort_images from lib.image_dedup import make_hashes, hashes_diff, is_duplicated from lib.PersistentSet import PersistentSet # - mongo_uri = json.load(open('./credentials/mongodb_credentials.json'))['uri'] mongo = MongoClient(mongo_uri) db = mongo['bad-vis'] posts = db['posts'] imagefiles = db['imagefiles'] imagevalidfiles = db['imagevalidfiles'] imagemeta = db['imagemeta'] images_dir = Path('../images') main_image_phashes = shelve.open('handmade/main_image_phashes') invalid_post_ids = set(json.load(open('handmade/invalid_post_ids.json'))) invalid_image_phashes = set(json.load(open('handmade/invalid_image_phashes.json'))) duplicated_image_phash_pairs = PersistentSet.load_set('handmade/duplicated_image_phash_pairs.json') imagevalidfiles.drop() imagemeta.drop() # discard reddit preview if (Imgur) albums or manual downloaded images exist discard_preview_post_ids = {f['post_id'] for f in [f for f in imagefiles.find({'index_in_album': {'$ne': 0}})]} len(discard_preview_post_ids) for f in tqdm(imagefiles.find()): if f['post_id'] in invalid_post_ids: # f['invalid'] = 'invalid_post' continue if f['phash'] in invalid_image_phashes: # f['invalid'] = 'invalid_image' continue if f['post_id'] in discard_preview_post_ids and f['index_in_album'] == 0: # f['invalid'] = 'overridden' continue imagevalidfiles.insert_one(f) # # Group image files into images class ImageMeta (): _attrs = [ 'id', 'post_id', 'image_id', 'short_image_id', 'album', 'index_in_album', 'image_type', 'file_path', 'source_platform', 'source', 'ext', 'animated', 'size', 'width', 'height', 'pixels', 'thumbnail', 'preview', 'external_link', 'archive', 'manual', 'ahash', 'phash', 'pshash', 'dhash', 'whash', 'related_images', 'image_order' ] def __init__ (self, imageFiles=[]): if len(imageFiles) == 0: raise Exception('Empty imageFiles array.') self._imageFiles = imageFiles self.image_id = imageFiles[0]['image_id'] self.image_order = [i['image_type'] for i in sort_images([ getattr(self, a) for a in self.available_image_types])] for k, v in self.main_image.items(): setattr(self, k, v) def digest (self): for k, v in self.main_image.items(): setattr(self, k, v) return {a:getattr(self, a) for a in ImageMeta._attrs} @property def is_hash_consistent (self): return _.every([self.hash_consistent(h) for h in ['phash']]) # return _.every([self.hash_consistent(h) for h in ['ahash', 'phash', 'dhash', 'whash']]) def hash_distance (self, hash_type): main_hash = imagehash.hex_to_hash(self.main_image[hash_type]) image_types = [t for t in self.image_order if t != 'thumbnail' and t != 'thumbnail_alt'] return [imagehash.hex_to_hash(self.find_image_type(t)[hash_type]) - main_hash for t in image_types] def hash_consistent (self, hash_type): main_hash = imagehash.hex_to_hash(self.main_image[hash_type]) return _.every(self.hash_distance(hash_type), lambda x: abs(x) < 5) # magic number threshold, by experiment when trying out the imagehash library @property def main_image (self): if self.main_image_phash: images = [getattr(self, a) for a in self.image_order] image = [i for i in images if i['phash'] == self.main_image_phash] if len(image) > 0: return image[0] return getattr(self, self.image_order[0]) @property def main_image_phash (self): return main_image_phashes.get(self.image_id, None) @property def related_images (self): return list({f['image_id'] for f in imagefiles.find({'post_id': self.post_id}, {'image_id': 1})}) @property def thumbnail (self): return self.find_image_type('thumbnail') @property def thumbnail_alt (self): return self.find_image_type('thumbnail_alt') @property def preview (self): return self.find_image_type('preview') @property def preview_alt (self): return self.find_image_type('preview_alt') @property def external_link (self): return self.find_image_type('external_link') @property def external_link_alt (self): return self.find_image_type('external_link_alt') @property def archive (self): return self.find_image_type('archive') @property def manual (self): return self.find_image_type('manual') def find_image_type (self, image_type): return _.find(self._imageFiles, lambda x: x['image_type'] == image_type) @property def available_image_types (self): return [image_type for image_type in ['manual', 'archive', 'external_link', 'external_link_alt', 'preview', 'preview_alt', 'thumbnail', 'thumbnail_alt'] if getattr(self, image_type)] def make_imageMeta (image_id): imageFiles = [i for i in imagevalidfiles.find({'image_id': image_id})] if imageFiles[0]['post_id'] in invalid_post_ids: return if not posts.find_one({'post_id': imageFiles[0]['post_id']}, {'_id': 1}): print(f"Cannot find corresponding post: {imageFiles[0]['post_id']}") return valid_imageFiles = [i for i in imageFiles if i['phash'] not in invalid_image_phashes] if len(valid_imageFiles) == 0: print(f"All image files are invalid: {image_id}") return for i in valid_imageFiles: del i['_id'] imageMeta = ImageMeta(valid_imageFiles) imagemeta.replace_one({'image_id': imageMeta.image_id}, imageMeta.digest(), upsert=True) return imageMeta imageMetas = parallel(make_imageMeta, {f['image_id'] for f in imagevalidfiles.find({}, {'image_id': 1})}) # imageMetas = parallel(make_imageMeta, {f['image_id'] for f in imagefiles.find({'source_platform': 'reddit'}, {'image_id': 1})}) # # Find images with inconsistent hash # + def make_link (url, text): return widgets.HTML(value=f"<a href='{url}' target='_blank'>{text}</a>") def make_main_image_box (imageMeta): layoutArgs = { 'padding': '10px', 'margin': '5px', 'border': '3px solid lightblue' } output = widgets.Output() def select (phash): with output: print(f"-{main_image_phashes.get(imageMeta.image_id, '')}") main_image_phashes[imageMeta.image_id] = phash imagemeta.replace_one({'image_id': imageMeta.image_id}, imageMeta.digest(), upsert=True) print(f"+{phash}") print(f"+: {imageMeta.main_image['image_type']}") def makeSelectBtn (imageFile): btn = widgets.Button(description="Select", button_style='') btn.on_click(lambda b: select(imageFile['phash'])) return btn def makeImageBox (imageFile): return VBox([ widgets.Label(value=f"{imageFile['image_type']} {imageFile['ext']}"), widgets.Label(value=f"{imageFile['width']} {imageFile['height']} {imageFile['size']}"), widgets.Label(value=f"{imageFile['phash']}"), widgets.Image(value=open(imageFile['file_path'], 'rb').read(), width=200), makeSelectBtn(imageFile) ]) with output: print(f"manual: {main_image_phashes.get(imageMeta.image_id, '')}") print(f"phash: {imageMeta.phash}") print(f"image_type: {imageMeta.image_type}") return HBox( [VBox([ make_link(f"https://www.reddit.com/r/{imageMeta.source}/comments/{imageMeta.id}", imageMeta.image_id), output ], layout=Layout(**layoutArgs))] + [makeImageBox(imageMeta.find_image_type(i)) for i in imageMeta.image_order]) # - cnt = 0 for imageMeta in tqdm(imageMetas): if not imageMeta: continue if not imageMeta.is_hash_consistent and imageMeta.image_order[0] != 'manual' and not imageMeta.main_image_phash: # if not imageMeta.is_hash_consistent and imageMeta.image_order[0] != 'manual': display(make_main_image_box(imageMeta)) cnt += 1 if cnt >= 10: break
notebooks/images_meta.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="1IZ2igsBMwo4" colab_type="code" colab={} import matplotlib.pyplot as plt import pandas as pd import random import numpy as np # + id="Jg2QJXw8OSgO" colab_type="code" colab={} import enum ranks = { "two" : 2, "three" : 3, "four" : 4, "five" : 5, "six" : 6, "seven" : 7, "eight" : 8, "nine" : 9, "ten" : 10, "jack" : 10, "queen" : 10, "king" : 10, "ace" : (1, 11) } class Suit(enum.Enum): spades = "spades" clubs = "clubs" diamonds = "diamonds" hearts = "hearts" # + id="W0r6gqp9OVwu" colab_type="code" colab={} class Card: def __init__(self, suit, rank, value): self.suit = suit self.rank = rank self.value = value def __str__(self): return self.rank + " of " + self.suit.value class Deck: def __init__(self, num=1): self.cards = [] for i in range(num): for suit in Suit: for rank, value in ranks.items(): self.cards.append(Card(suit, rank, value)) def shuffle(self): random.shuffle(self.cards) def deal(self): return self.cards.pop(0) def peek(self): if len(self.cards) > 0: return self.cards[0] def add_to_bottom(self, card): self.cards.append(card) def __str__(self): result = "" for card in self.cards: result += str(card) + "\n" return result def __len__(self): return len(self.cards) # + id="6geWMqLAObzh" colab_type="code" colab={} # This follows the same, official rules every time. # Still need to figure out what happens if there are multiple Aces. def dealer_eval(player_hand): num_ace = 0 use_one = 0 for card in player_hand: if card.rank == "ace": num_ace += 1 use_one += card.value[0] # use 1 for Ace else: use_one += card.value if num_ace > 0: # See if using 11 instead of 1 for the Aces gets the # dealer's hand value closer to the [17, 21] range # The dealer will follow Hard 17 rules. # This means the dealer will not hit again if # the Ace yields a 17. # This also means that Aces initially declared as 11's can # be changed to 1's as new cards come. ace_counter = 0 while ace_counter < num_ace: # Only add by 10 b/c 1 is already added before use_eleven = use_one + 10 if use_eleven > 21: return use_one elif use_eleven >= 17 and use_eleven <= 21: return use_eleven else: # The case where even using Ace as eleven is less than 17. use_one = use_eleven ace_counter += 1 return use_one else: return use_one # + id="_ofrXW48OgrP" colab_type="code" colab={} def player_eval(player_hand): num_ace = 0 # use_one means that every ace that in the hand is counted as one. use_one = 0 for card in player_hand: if card.rank == "ace": num_ace += 1 use_one += card.value[0] # use 1 for Ace else: use_one += card.value if num_ace > 0: # Define player policy for Aces: # Make Aces 11 if they get you to the range [18,21] # Otherwise, use one. ace_counter = 0 while ace_counter < num_ace: # Only add by 10 b/c 1 is already added before use_eleven = use_one + 10 if use_eleven > 21: return use_one elif use_eleven >= 18 and use_eleven <= 21: return use_eleven else: # This allows for some Aces to be 11s, and others to be 1. use_one = use_eleven ace_counter += 1 return use_one else: return use_one # + id="DHHJGyzCOlZj" colab_type="code" colab={} def dealer_turn(dealer_hand, deck): # Calculate dealer hand's value. dealer_value = dealer_eval(dealer_hand) # Define dealer policy (is fixed to official rules) # The dealer keeps hitting until their total is 17 or more while dealer_value < 17: # hit dealer_hand.append(deck.deal()) dealer_value = dealer_eval(dealer_hand) return dealer_value, dealer_hand, deck # + id="0EXfv6AFOsFb" colab_type="code" colab={} import random import numpy as np import gym from gym import error, spaces, utils from gym.utils import seeding INITIAL_BALANCE = 1000 NUM_DECKS = 6 class BlackjackEnv(gym.Env): metadata = {'render.modes': ['human']} def __init__(self): super(BlackjackEnv, self).__init__() # Initialize the blackjack deck. self.bj_deck = Deck(NUM_DECKS) self.player_hand = [] self.dealer_hand = [] self.reward_options = {"lose":-100, "tie":0, "win":100} # hit = 0, stand = 1 self.action_space = spaces.Discrete(2) ''' First element of tuple is the range of possible hand values for the player. (3 through 20) This is the possible range of values that the player will actually have to make a decision for. Any player hand value 21 or above already has automatic valuations, and needs no input from an AI Agent. ''' # Second element of the tuple is the range of possible values for the dealer's upcard. (2 through 11) self.observation_space = spaces.Tuple((spaces.Discrete(18), spaces.Discrete(10))) self.done = False def _take_action(self, action): if action == 0: # hit self.player_hand.append(self.bj_deck.deal()) # re-calculate the value of the player's hand after any changes to the hand. self.player_value = player_eval(self.player_hand) def step(self, action): self._take_action(action) # End the episode/game is the player stands or has a hand value >= 21. self.done = action == 1 or self.player_value >= 21 # rewards are 0 when the player hits and is still below 21, and they # keep playing. rewards = 0 if self.done: # CALCULATE REWARDS if self.player_value > 21: # above 21, player loses automatically. rewards = self.reward_options["lose"] elif self.player_value == 21: # blackjack! Player wins automatically. rewards = self.reward_options["win"] else: ## Begin dealer turn phase. dealer_value, self.dealer_hand, self.bj_deck = dealer_turn(self.dealer_hand, self.bj_deck) ## End of dealer turn phase #------------------------------------------------------------# ## Final Compare if dealer_value > 21: # dealer above 21, player wins automatically rewards = self.reward_options["win"] elif dealer_value == 21: # dealer has blackjack, player loses automatically rewards = self.reward_options["lose"] else: # dealer and player have values less than 21. if self.player_value > dealer_value: # player closer to 21, player wins. rewards = self.reward_options["win"] elif self.player_value < dealer_value: # dealer closer to 21, dealer wins. rewards = self.reward_options["lose"] else: rewards = self.reward_options["tie"] self.balance += rewards # Subtract by 1 to fit into the possible observation range. # This makes the possible range of 3 through 20 into 1 through 18 player_value_obs = self.player_value - 2 # get the value of the dealer's upcard, this value is what the agent sees. # Subtract by 1 to fit the possible observation range of 1 to 10. upcard_value_obs = dealer_eval([self.dealer_upcard]) - 1 # the state is represented as a player hand-value + dealer upcard pair. obs = np.array([player_value_obs, upcard_value_obs]) return obs, rewards, self.done, {} def reset(self): # resets game to an initial state # Add the player and dealer cards back into the deck. self.bj_deck.cards += self.player_hand + self.dealer_hand # Shuffle before beginning. Only shuffle once before the start of each game. self.bj_deck.shuffle() self.balance = INITIAL_BALANCE self.done = False # returns the start state for the agent # deal 2 cards to the agent and the dealer self.player_hand = [self.bj_deck.deal(), self.bj_deck.deal()] self.dealer_hand = [self.bj_deck.deal(), self.bj_deck.deal()] self.dealer_upcard = self.dealer_hand[0] # calculate the value of the agent's hand self.player_value = player_eval(self.player_hand) # Subtract by 1 to fit into the possible observation range. # This makes the possible range of 2 through 20 into 1 through 18 player_value_obs = self.player_value - 2 # get the value of the dealer's upcard, this value is what the agent sees. # Subtract by 1 to fit the possible observation range of 1 to 10. upcard_value_obs = dealer_eval([self.dealer_upcard]) - 1 # the state is represented as a player hand-value + dealer upcard pair. obs = np.array([player_value_obs, upcard_value_obs]) return obs def render(self, mode='human', close=False): # convert the player hand into a format that is # easy to read and understand. hand_list = [] for card in self.player_hand: hand_list.append(card.rank) # re-calculate the value of the dealer upcard. upcard_value = dealer_eval([self.dealer_upcard]) print(f'Balance: {self.balance}') print(f'Player Hand: {hand_list}') print(f'Player Value: {self.player_value}') print(f'Dealer Upcard: {upcard_value}') print(f'Done: {self.done}') print() # + id="tDSFV32xO3jl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="14b781e5-8cec-4847-b9ab-dce6c476880b" import random env = BlackjackEnv() total_rewards = 0 NUM_EPISODES = 1000 for _ in range(NUM_EPISODES): env.reset() while env.done == False: action = env.action_space.sample() new_state, reward, done, desc = env.step(action) state = new_state total_rewards += reward avg_reward = total_rewards / NUM_EPISODES print(avg_reward) # + id="ZGqE1TviO7WR" colab_type="code" colab={} def run_mc(env, num_episodes): ''' observation_space[0] is the 18 possible player values. (3 through 20) observation_space[1] is the 10 possible dealer upcards. (2 through 11) Combining these together yields all possible states. Multiplying this with hit/stand yields all possible state/action pairs. This is the Q map. ''' Q = np.zeros([env.observation_space[0].n * env.observation_space[1].n, env.action_space.n], dtype=np.float16) # This map contains the probability distributions for each action (hit or stand) given a state. # The state (combo of player hand value and dealer upcard value) index in this array yields a 2-element array # The 0th index of this 2-element array refers to the probability of "hit", and the 1st index is the probability of "stand" prob = np.zeros([env.observation_space[0].n * env.observation_space[1].n, env.action_space.n], dtype=np.float16) + 0.5 # The learning rate. Very small to avoid making quick, large changes in our policy. alpha = 0.001 epsilon = 1 # The rate by which epsilon will decay over time. # Since the probability we take the option with the highest Q-value is 1-epsilon + probability, # this decay will make sure we are the taking the better option more often in the longrun. # This allows the algorithm to explore in the early stages, and exploit in the later stages. decay = 0.9999 # The lowest value that epsilon can go to. # Although the decay seems slow, it actually grows exponentially, and this is magnified when # running thousands of episodes. epsilon_min = 0.9 # may have to be tweaked later. gamma = 0.8 for _ in range(num_episodes): episode = play_game(env, Q, prob) epsilon = max(epsilon * decay, epsilon_min) Q = update_Q(env, episode, Q, alpha, gamma) prob = update_prob(env, episode, Q, prob, epsilon) return Q, prob # + id="fet_o4sIr_1H" colab_type="code" colab={} def run_mc(env, num_episodes): Q = np.zeros([env.observation_space[0].n * env.observation_space[1].n, env.action_space.n], dtype=np.float16) prob = np.zeros([env.observation_space[0].n * env.observation_space[1].n, env.action_space.n], dtype=np.float16) + 0.5 alpha = 0.001 epsilon = 1 decay = 0.9999 epsilon_min = 0.9 # may have to be tweaked later. gamma = 0.8 for _ in range(num_episodes): episode = play_game(env, Q, prob) epsilon = max(epsilon * decay, epsilon_min) Q = update_Q(env, episode, Q, alpha, gamma) prob = update_prob(env, episode, Q, prob, epsilon) return Q, prob # + id="6BHJv-QHPOnk" colab_type="code" colab={} def play_game(env, Q, prob): # Can contain numerous state->action->reward tuples because a round of # Blackjack is not always resolved in one turn. # However, there will be no state that has a player hand value that exceeds 20, since only initial # states BEFORE actions are made are used when storing state->action->reward tuples. episode = [] state = env.reset() while env.done == False: if state[0] == 19: #Player was dealt Blackjack, player_value already subtracted by 2 to get state[0] # don't do any episode analysis for this episode. This is a useless episode. next_state, reward, env.done, info = env.step(1) # doesn't matter what action is taken. else: # Get the index in Q that corresponds to the current state Q_state_index = get_Q_state_index(state) # Use the index to get the possible actions, and use np.argmax() # to get the index of the action that has the highest current Q # value. Index 0 is hit, index 1 is stand. best_action = np.argmax(Q[Q_state_index]) # Go to the prob table to retrieve the probability of this action. # This uses the same Q_state_index used for finding the state index # of the Q-array. prob_of_best_action = get_prob_of_best_action(env, state, Q, prob) action_to_take = None if random.uniform(0,1) < prob_of_best_action: # Take the best action action_to_take = best_action else: # Take the other action action_to_take = 1 if best_action == 0 else 0 # The agent does the action, and we get the next state, the rewards, # and whether the game is now done. next_state, reward, env.done, info = env.step(action_to_take) # We now have a state->action->reward sequence we can log # in `episode` episode.append((state, action_to_take, reward)) # update the state for the next decision made by the agent. state = next_state return episode # + id="_dIovLcNPX5E" colab_type="code" colab={} def update_Q(env, episode, Q, alpha, gamma): ''' THIS IS WHERE THE ALGORITHM HINGES ON BEING FIRST VISIT OR EVERY VISIT. I AM GOING TO USE FIRST-VISIT, AND HERE'S WHY. If you want first-visit, you need to use the cumulative reward of the entire episode when updating a Q-value for ALL of the state/action pairs in the episode, even the first state/action pair. In this algorithm, an episode is a round of Blackjack. Although the bulk of the reward may come from the 2nd or 3rd decision, deciding to hit on the 1st decision is what enabled the future situations to even occur, so it is important to include the entire cumulative reward. We can reduce the impact of the rewards of the future decisions by lowering gamma, which will lower the G value for our early state/action pair in which we hit and did not get any immediate rewards. This will make our agent consider future rewards, and not just look at each state in isolation despite having hit previously. If you want Every-Visit MC, do not use the cumulative rewards when updating Q-values, and just use the immediate reward in this episode for each state/action pair. ''' step = 0 for state, action, reward in episode: # calculate the cumulative reward of taking this action in this state. # Start from the immediate rewards, and use all the rewards from the # subsequent states. Do not use rewards from previous states. total_reward = 0 gamma_exp = 0 for curr_step in range(step, len(episode)): curr_reward = episode[curr_step][2] total_reward += (gamma ** gamma_exp) * curr_reward gamma_exp += 1 # Update the Q-value Q_state_index = get_Q_state_index(state) curr_Q_value = Q[Q_state_index][action] Q[Q_state_index][action] = curr_Q_value + alpha * (total_reward - curr_Q_value) # update step to start further down the episode next time. step += 1 return Q # + id="wnwidwWg1btk" colab_type="code" colab={} def update_Q(env, episode, Q, alpha, gamma): step = 0 for state, action, reward in episode: # calculate the cumulative reward of taking this action in this state. # Start from the immediate rewards, and use all the rewards from the # subsequent states. Do not use rewards from previous states. total_reward = 0 gamma_exp = 0 for curr_step in range(step, len(episode)): curr_reward = episode[curr_step][2] total_reward += (gamma ** gamma_exp) * curr_reward gamma_exp += 1 # Update the Q-value Q_state_index = get_Q_state_index(state) curr_Q_value = Q[Q_state_index][action] Q[Q_state_index][action] = curr_Q_value + alpha * (total_reward - curr_Q_value) # update step to start further down the episode next time. step += 1 return Q # + id="AhE-gkukPc4e" colab_type="code" colab={} def update_prob(env, episode, Q, prob, epsilon): for state, action, reward in episode: # Update the probabilities of the actions that can be taken given the current # state. The goal is that the new update in Q has changed what the best action # is, and epsilon will be used to create a small increase in the probability # that the new, better action is chosen. prob = update_prob_of_best_action(env, state, Q, prob, epsilon) return prob # + id="v1NMR26ZPfuz" colab_type="code" colab={} # Given a state, derive the corresponding index in the Q-array. # The state is a player hand value + dealer upcard pair, # so a "hashing" formula must be used to allocate the # indices of the Q-array properly. def get_Q_state_index(state): # the player value is already subtracted by 1 in the env when it returns the state. # subtract by 1 again to fit with the array indexing that starts at 0 initial_player_value = state[0] - 1 # the upcard value is already subtracted by 1 in the env when it returns the state. # dealer_upcard will be subtracted by 1 to fit with the array indexing that starts at 0 dealer_upcard = state[1] - 1 return (env.observation_space[1].n * (initial_player_value)) + (dealer_upcard) # + id="7AGBN7t3Piqn" colab_type="code" colab={} def get_prob_of_best_action(env, state, Q, prob): # Use the mapping function to figure out which index of Q corresponds to # the player hand value + dealer upcard value that defines each state. Q_state_index = get_Q_state_index(state) # Use this index in the Q 2-D array to get a 2-element array that yield # the current Q-values for hitting (index 0) and standing (index 1) in this state. # Use the np.argmax() function to find the index of the action that yields the # rewards i.e. the best action we are looking for. best_action = np.argmax(Q[Q_state_index]) # Retrieve the probability of the best action using the # state/action pair as indices for the `prob` array, # which stores the probability of taking an action (hit or stand) # for a given state/action pair. return prob[Q_state_index][best_action] def update_prob_of_best_action(env, state, Q, prob, epsilon): Q_state_index = get_Q_state_index(state) best_action = np.argmax(Q[Q_state_index]) # Slightly alter the probability of this best action being taken by using epsilon # Epsilon starts at 1.0, and slowly decays over time. # Therefore, as per the equation below, the AI agent will use the probability listed # for the best action in the `prob` array during the beginning of the algorithm. # As time goes on, the likelihood that the best action is taken is increased from # what is listed in the `prob` array. # This allows for exploration of other moves in the beginning of the algorithm, # but exploitation later for a greater reward. #prob[Q_state_index][best_action] = prob[Q_state_index][best_action] + ((1 - epsilon) * (1 - prob[Q_state_index][best_action])) prob[Q_state_index][best_action] = min(1, prob[Q_state_index][best_action] + 1 - epsilon) other_action = 1 if best_action == 0 else 0 prob[Q_state_index][other_action] = 1 - prob[Q_state_index][best_action] return prob # + id="zH7yB6tLPpNv" colab_type="code" colab={} env = BlackjackEnv() new_Q, new_prob = run_mc(env, 1000000) # + id="yozGnhP6PtAH" colab_type="code" colab={} def best_policy(Q): best_policy_binary = [] best_policy_string = [] best_policy_colors = [] for i in range(len(Q)): best_policy_binary.append(np.argmax(Q[i])) best_policy_string.append("H" if np.argmax(Q[i]) == 0 else "S") best_policy_colors.append("g" if np.argmax(Q[i]) == 0 else "r") return best_policy_binary, best_policy_string, best_policy_colors # + id="_YIt4rm1PxTG" colab_type="code" colab={} import pandas as pd new_Q_binary, new_Q_string, new_Q_colors = best_policy(new_Q) df = pd.DataFrame(columns = range(2, 12)) color_df = pd.DataFrame(columns = range(2, 12)) for s in range(3, 21): # possible player values in the range 3 to 20 start = env.observation_space[1].n * (s-3) end = start + 10 df.loc[s]=(new_Q_string[start:end]) color_df.loc[s]=(new_Q_colors[start:end]) # + id="Wta-lkWqPzuP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="0423103f-41e7-41c0-9b0c-ea0eb01975ed" import matplotlib.pyplot as plt fig, ax = plt.subplots() # hide axes fig.patch.set_visible(False) ax.set_axis_off() ax.axis('tight') ax.table(cellText=df.values, cellColours=color_df.values, cellLoc="center", rowLabels=df.index, colLabels=df.columns, loc='center') fig.tight_layout() plt.show() # + id="FW6mnnvbP2mf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="ab6c19a5-5456-4945-a2f6-f32e97e08af1" import random env = BlackjackEnv() total_rewards = 0 NUM_EPISODES = 100000 for _ in range(NUM_EPISODES): state = env.reset() while env.done == False: if state[0] == 19: # Player was dealt Blackjack next_state, reward, env.done, info = env.step(1) # doesn't matter what action is taken. # don't do any episode analysis for this episode. This is a useless episode. total_rewards += reward else: Q_index = get_Q_state_index(state) action = new_Q_binary[Q_index] new_state, reward, done, desc = env.step(action) state = new_state total_rewards += reward avg_reward = total_rewards / NUM_EPISODES print(avg_reward)
Blackjack_First_Visit_MC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import modules on the top of the notebooks from ctext import * # the zen of python import this # # Use other kernel inside a cell # # - change `%%bash` to `%%cmd` if you are in a Windows system # + language="bash" # echo Hi # - # # String age = 11 print('I am', age, 'years old.') print('I am ' + str(age) + ' years old.') print('I am {} years old.'.format(age)) # python 3.6 up print(f'I am {age} years old.') print(f'I am {age * 2} years old.') # # Functional Programming # this would give the full text of dao-de-jing in a list, # every elements in the list represent one paragraph dao_de_jing = gettextasparagrapharray("ctp:dao-de-jing") # using functional programming: MAP lengthDDJ = list(map(len, dao_de_jing)) longest_passage = lengthDDJ.index(max(lengthDDJ)) print("The longest paragraph is paragraph number " + str(longest_passage)) # using functional programming: list comprehension lengthDDJ = [len(passage) for passage in dao_de_jing] longest_passage = lengthDDJ.index(max(lengthDDJ)) print("The longest paragraph is paragraph number " + str(longest_passage)) # + # it is equivalent to do ... longest_passage = None longest_length = 0 for passage_numer in range(0, len(dao_de_jing)): passage_text = dao_de_jing[passage_numer] if len(passage_text) > longest_length: longest_passage = passage_numer longest_length = len(passage_text) print("The longest paragraph is paragraph number " + str(longest_passage)) # + # It is also equivalent to ... longest_passage = None longest_length = 0 for passage_numer, passage_text in enumerate(dao_de_jing): if len(passage_text) > longest_length: longest_passage = passage_numer longest_length = len(passage_text) print("The longest paragraph is paragraph number " + str(longest_passage)) # + # It is also equivalent to ... passage_length = [] for passage_text in dao_de_jing: passage_length.append(len(passage_text)) longest_passage = passage_length.index(max(passage_length)) print("The longest paragraph is paragraph number " + str(longest_passage)) # - # # args & kargs # + def function(*args, **kwargs): print("unnamed args:", args) print("keyword args:", kwargs) function(1, 2, key1='string1', key2='string2') # + hide_input=false # and they do not have to be called args or kwargs excatly def function(*Alice, **Bob): print("unnamed args:", Alice) print("keyword args:", Bob) function(1, 2, key1='string1', key2='string2') # - # # Resources: # # There are some more sophicated tutorials about *string formatting*, *functional programming*, and *list comprehension* out there in the Internet. I just pick 3 tutorials below which are really helpful for me. # # The `1. 2. 3.` stands for hardness, from the easiest to the hardest. But this talk, **Loop Like A Native**, could give you an in-depth understanding about the loops in python even if the audience had not learned all types of the loops in python before. So, if you don't have time to go through all resources, I would suggest going to the **Loop Like A Native** directly. # # 1. The **4 Major Ways** to Do **String Formatting** in Python <font color="gray">(by <NAME>)</font>: https://dbader.org/blog/python-string-formatting # 2. A practical introduction to **functional programming** <font color="gray">(by <NAME>)</font>: https://maryrosecook.com/blog/post/a-practical-introduction-to-functional-programming # 3. **Loop Like A Native** <font color="gray">(by <NAME>)</font> : https://nedbatchelder.com/text/iter.html # **NOTE:** Some of the above tutorial use python 2 syntax. Make sure you use the correct one in python 3 syntax. # # |python2|python3| # |---- | ----| # |`print "Hello"`|`print("Hello")` or `print ("Hello)`| # |`reduce(function, iterable)`|`from funtools import reduce; reduce(function, iterable)`|
Python_Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Summary # This script is written to transform training images for transfer learning using the output bounding box calculated using YOLO import os import pathlib from PIL import Image # Changeable Param train_dir = r'D:\Resources\Inat_Partial\Aves_Small_SS1_Train\CV_0' output_dir = r'D:\Resources\Inat_Partial\Aves_Small_SS1_Train_YOLO\CV_0' yolo_output_csv = r'D:\Workspace\Jupyter\COMP5703\Resources\ss1_bbox.csv' if(os.path.isdir(output_dir) == False): pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True) def crop(image_path, coords, saved_location): """ @param image_path: The path to the image to edit @param coords: A tuple of x/y coordinates (x1, y1, x2, y2) @param saved_location: Path to save the cropped image """ try: image_obj = Image.open(image_path) cropped_image = image_obj.crop(coords) cropped_image.save(saved_location) except Exception as e: pass with open(yolo_output_csv, 'r', encoding='utf-8') as yoc: prev_index = 0 prev_image_name = '' for i,line in enumerate(yoc): if i % 1000 == 0: print('Parsing line',i) subdir, image_name, top, left, bottom, right, score = line.split(',') top, left, bottom, right = [int(coor) for coor in (top, left, bottom, right)] score = float(score) train_image_path = os.path.join(train_dir, subdir, image_name) output_subdir_path = os.path.join(output_dir, subdir) # In case that one image has more than one bounding boxes, the next one has to be saved in a different name if prev_image_name == image_name: # If prev image is abcd.jpg, the next one will be saved as abcd(1).jpg and so on custom_image_name = (image_name.split('.')[0] + r'(' + str(prev_index) + r')' + r'.' + image_name.split('.')[1]) output_image_path = os.path.join(output_dir, subdir, custom_image_name) else: output_image_path = os.path.join(output_dir, subdir, image_name) if(os.path.isdir(output_subdir_path) == False): os.mkdir(output_subdir_path) crop(train_image_path, (left,top,right,bottom), output_image_path) prev_index += 1 prev_image_name = image_name
Util/yolo_image_crop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mypm3env # language: python # name: mypm3env # --- # %config IPCompleter.greedy=True # %config Completer.use_jedi = False # + from os.path import join import pandas as pd import numpy as np import pymc3 as pm import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm import theano import theano.tensor as tt import arviz # - d = pd.read_csv(join('_data', 'height.csv')) d = d[d['age'] >= 18] # ### Linear regression # + endog = d['height'] exog = sm.add_constant(d['weight']) mod = sm.OLS(endog, exog) L = mod.fit() # - L.summary() sns.regplot(data=d, x='weight', y='height', ci=None, scatter_kws={'color': 'lightblue'}, line_kws={'color': 'r', 'linewidth': 3}) plt.show() # ### Bayesian regression x = theano.shared(d['weight'].values) y = theano.shared(d['height'].values) nIter = 1000 nChains = 4 nWarmup = 1000 with pm.Model() as model: alpha = pm.Normal('alpha', 170, 100) beta = pm.Normal('beta', 0, 20) sigma = pm.HalfCauchy('sigma', beta=20) y = pm.Normal('y', mu=alpha + beta * x, sigma=sigma, observed=y) trace = pm.sample(nIter, tune=nWarmup, chains=nChains, cores=4, return_inferencedata=True, random_seed=1450154626) # ### Diagnostics # notice that the values for alpha and beta are the same as # the ones estimated using `statsmodels` with model: print(pm.summary(trace).round(2)) arviz.plot_trace(trace['posterior']) plt.show()
lec08-regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="2M6sklI0ig9J" colab_type="text" # #Function # + [markdown] id="6Mxj4QRxj7Zi" colab_type="text" # *01*- No Parameter-No return # + id="HRHgcWdEiXWH" colab_type="code" outputId="fc82dcab-8c27-403a-8fef-56f9565c5f24" executionInfo={"status": "ok", "timestamp": 1584551724920, "user_tz": 420, "elapsed": 1289, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} colab={"base_uri": "https://localhost:8080/", "height": 34} #.....Function start.... def add(): a = 2 b = 3 sum = a +b print(sum) #.....Function end.. add() # + [markdown] id="cJ6cmzRLpLXF" colab_type="text" # 02-No parameeter- Return Value # + id="0Bc3YkICk5hW" colab_type="code" outputId="cda65f73-69b9-4a51-86bc-9ce9899fa68a" executionInfo={"status": "ok", "timestamp": 1584551843955, "user_tz": 420, "elapsed": 1400, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} colab={"base_uri": "https://localhost:8080/", "height": 34} #.....Function start.... def add(): a = 2 b = 3 sum = a +b return sum #.....Function end.. result = add() print(result) # + [markdown] id="mzlakMQOppkM" colab_type="text" # 03-with Parameter - No *return* # + id="RruotPmgplLM" colab_type="code" outputId="bc39f5ca-9d5b-4423-8a87-e8f0a368caa1" executionInfo={"status": "ok", "timestamp": 1584552061405, "user_tz": 420, "elapsed": 851, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} colab={"base_uri": "https://localhost:8080/", "height": 34} #.....Function start.... def add(x, y): sum = x + y print(sum) #.....Function end.. add(4, 5) # + [markdown] id="XVhcOfZUqg5M" colab_type="text" # 04-With Parameter - with return # + id="4vJ-HKjpqaXr" colab_type="code" outputId="3fc629d4-68c8-45c6-a95f-4b2042d50bc3" executionInfo={"status": "ok", "timestamp": 1584552424717, "user_tz": 420, "elapsed": 1300, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} colab={"base_uri": "https://localhost:8080/", "height": 34} #.....Function start.... def add(x, y): sum = x + y return sum #.....Function end.. result = add(4, 5) print(result) # + [markdown] id="nQ17VAKa5tPE" colab_type="text" # ##Parameter in Function # + [markdown] id="1tYRH-ub54EZ" colab_type="text" # 01-No Default parameter # + id="geqoHWsMry_M" colab_type="code" outputId="9d5ff8bb-ebd3-4c10-a5ca-09414e4035f7" executionInfo={"status": "ok", "timestamp": 1584556987351, "user_tz": 420, "elapsed": 2969, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} colab={"base_uri": "https://localhost:8080/", "height": 34} def mul(x, y): result = x * y print(result) mul(3, 4) # + [markdown] id="DEBd3MCB9yHZ" colab_type="text" # 02-Default parameter # + id="DtRfl9c49IWA" colab_type="code" outputId="35870076-54d8-4b5a-c429-7ae95f974b60" executionInfo={"status": "ok", "timestamp": 1584557095172, "user_tz": 420, "elapsed": 1592, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} colab={"base_uri": "https://localhost:8080/", "height": 67} def mul(x=2, y=4): result = x * y print(result) mul() mul(5) mul(3, 5) # + [markdown] id="mTVihOP595yQ" colab_type="text" # 03-No Default + Default Parameter # + id="UfbDOmj79nKY" colab_type="code" outputId="ddc739c1-adf9-4c68-bd59-f8c01f84453c" executionInfo={"status": "ok", "timestamp": 1584557469065, "user_tz": 420, "elapsed": 1342, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} colab={"base_uri": "https://localhost:8080/", "height": 50} def mul(x, y, z=2): # default parameter not put right side result = x * y * z print(result) mul(3, 4) mul(3, 4, 5) # + [markdown] id="xsBnfzbUDErc" colab_type="text" # 04-Arbitray Parameter # # + id="L2qyvMcgDbS6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 151} outputId="553805ad-a80e-49e0-8210-8c462ef5f958" executionInfo={"status": "ok", "timestamp": 1584575535603, "user_tz": 420, "elapsed": 1055, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} def add(*a): print(a) print(type(a)) add() add(2) add(2, 3) add(4, 5, "md") # + id="hoPceJq9EAf7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="05cb98a4-3b84-4663-af32-4a2d134302ac" executionInfo={"status": "ok", "timestamp": 1584578899549, "user_tz": 420, "elapsed": 1123, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} def add(**a): print(a) print(type(a)) add() add(mohan="4", reddy="6") add(mohan="4", reddy="6", pandey="3") # + [markdown] id="h97nVxXJRgRY" colab_type="text" # ##Keyword Argument # + id="bQgZcrAVRpjN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="dc7cb54c-5982-422f-e3d8-24898ed9005b" executionInfo={"status": "ok", "timestamp": 1584579393260, "user_tz": 420, "elapsed": 1039, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} def fun(name, msg): print(name, msg) fun("meraz", "hi") fun("hi", "ram") fun(msg="hi", name="ram") # + [markdown] id="zWDUY1RyAjXy" colab_type="text" # ##Lambda function # + id="rKekxzDuA0_y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4500e011-6ef3-4d23-dbb2-0a171e9692a0" executionInfo={"status": "ok", "timestamp": 1584574870588, "user_tz": 420, "elapsed": 1262, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} fun = lambda x: 10*x res = fun(3) print(res) # + id="v5PBeGqv_CgW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0fd371d8-57e1-4c41-a2a8-6c0f97496691" executionInfo={"status": "ok", "timestamp": 1584574082747, "user_tz": 420, "elapsed": 1589, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} def cube(a, sq): t = sq(a) return a*t def square(b): return b*b res = cube(10, square) print(res) # + id="IOCwF8oo-JUK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3ad1852d-a1f0-4ee0-827c-6e14f37b0bba" executionInfo={"status": "ok", "timestamp": 1584574541089, "user_tz": 420, "elapsed": 1400, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} def cube(a, sq): t = sq(a) return a*t res = cube(10, lambda x:x*x) print(res) # + [markdown] id="YEPVjOaWS-XY" colab_type="text" # ##Global / Local Variabel Scope # + id="wUzbSLwA_p6s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c6b9663a-8176-4a32-adeb-d5baf65948f8" executionInfo={"status": "ok", "timestamp": 1584580168576, "user_tz": 420, "elapsed": 1213, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} a = 2 b = 3 def fun(): c = 4 d = a + b + c print(d) fun() # + id="4jn5OkouVKEP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 212} outputId="f2bf147f-6683-4afc-d8a4-d3f9e93f45d4" executionInfo={"status": "error", "timestamp": 1584580170242, "user_tz": 420, "elapsed": 670, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} a = 2 b = 3 def fun(): c = 4 d = a + b + c print(d) # + [markdown] id="3zrpFikbVqHX" colab_type="text" # ##Nested Function # + id="grGPE9XOVZN-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6856ce85-5056-497c-90a2-9eb1319a21df" executionInfo={"status": "ok", "timestamp": 1584580432646, "user_tz": 420, "elapsed": 2376, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} def outer(): print("Outer function run") def inner(): print("inner function run") outer() # + [markdown] id="7GeGkrITWw4W" colab_type="text" # #Functon documentation # + id="l6D_gmFeWoAu" colab_type="code" colab={} def add(a, b): """add Function take two int value and add return type >> Int """ c = a + b print(c) # + id="SthAcuFqXjA-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6154d3a5-e40a-4e30-e45d-ced5c012b4b6" executionInfo={"status": "ok", "timestamp": 1584580699919, "user_tz": 420, "elapsed": 1026, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCDqeiThy6Uz7npH4ERYGnqSgnpBR6l78yLv1B=s64", "userId": "16501587712343191977"}} add(2,5) # + id="e93O8THQXqLW" colab_type="code" colab={}
mod_04_function/mod_04_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # System libs import os import time # import math import random import argparse from distutils.version import LooseVersion # Numerical libs import torch import torch.nn as nn # Our libs from config import * from dataset import TrainDataset from models import models #ModelBuilder, SegmentationModule from models import * from utils import AverageMeter, parse_devices, setup_logger from lib.nn import UserScatteredDataParallel, user_scattered_collate, patch_replication_callback from matplotlib import pyplot as plt # + from yacs.config import CfgNode as CN # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.DIR = "ckpt/resnet50-upernet" # ----------------------------------------------------------------------------- # Dataset # ----------------------------------------------------------------------------- _C.DATASET = CN() _C.DATASET.root_dataset = "./data/" _C.DATASET.list_train = "./data/training.odgt" _C.DATASET.list_val = "./data/validation.odgt" _C.DATASET.num_class = 2 # multiscale train/test, size of short edge (int or tuple) _C.DATASET.imgSizes = (300, 375, 450, 525, 600) # maximum input image size of long edge _C.DATASET.imgMaxSize = 1000 # maxmimum downsampling rate of the network _C.DATASET.padding_constant = 8 # downsampling rate of the segmentation label _C.DATASET.segm_downsampling_rate = 8 # randomly horizontally flip images when train/test _C.DATASET.random_flip = True # ----------------------------------------------------------------------------- # Model # ----------------------------------------------------------------------------- _C.MODEL = CN() # architecture of net_encoder _C.MODEL.arch_encoder = "resnet50" # architecture of net_decoder _C.MODEL.arch_decoder = "upernet" # weights to finetune net_encoder _C.MODEL.weights_encoder = "" # weights to finetune net_decoder _C.MODEL.weights_decoder = "" # number of feature channels between encoder and decoder _C.MODEL.fc_dim = 2048 # ----------------------------------------------------------------------------- # Training # ----------------------------------------------------------------------------- _C.TRAIN = CN() _C.TRAIN.batch_size_per_gpu = 1 # epochs to train for _C.TRAIN.num_epoch = 1 # epoch to start training. useful if continue from a checkpoint _C.TRAIN.start_epoch = 0 # iterations of each epoch (irrelevant to batch size) _C.TRAIN.epoch_iters = 22 _C.TRAIN.optim = "SGD" _C.TRAIN.lr_encoder = 0.02 _C.TRAIN.lr_decoder = 0.02 # power in poly to drop LR _C.TRAIN.lr_pow = 0.9 # momentum for sgd, beta1 for adam _C.TRAIN.beta1 = 0.9 # weights regularizer _C.TRAIN.weight_decay = 1e-4 # the weighting of deep supervision loss _C.TRAIN.deep_sup_scale = 0.4 # fix bn params, only under finetuning _C.TRAIN.fix_bn = False # number of data loading workers _C.TRAIN.workers = 1 # frequency to display _C.TRAIN.disp_iter = 20 # manual seed _C.TRAIN.seed = 304 # ----------------------------------------------------------------------------- # Validation # ----------------------------------------------------------------------------- _C.VAL = CN() # currently only supports 1 _C.VAL.batch_size = 1 # output visualization during validation _C.VAL.visualize = False # the checkpoint to evaluate on _C.VAL.checkpoint = "epoch_20.pth" # ----------------------------------------------------------------------------- # Testing # ----------------------------------------------------------------------------- _C.TEST = CN() # currently only supports 1 _C.TEST.batch_size = 1 # the checkpoint to test on _C.TEST.checkpoint = "epoch_20.pth" # folder to output visualization results _C.TEST.result = "./" cfg = _C parser = argparse.ArgumentParser( description="PyTorch Semantic Segmentation Training" ) parser.add_argument( "--cfg", default="configuration/resnet50dilated-ppm_deepsup.yaml", metavar="FILE", help="path to config file", type=str, ) # parser.add_argument( # "--gpus", # default="0-3", # help="gpus to use, e.g. 0-3 or 0,1,2,3" # ) parser.add_argument( "--gpus", default="0", help="gpus to use, e.g. 0-3 or 0,1,2,3" ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args(args=[]) # cfg.merge_from_file(args.cfg) cfg.merge_from_list(args.opts) if not os.path.isdir(cfg.DIR): os.makedirs(cfg.DIR) with open(os.path.join(cfg.DIR, 'config.yaml'), 'w') as f: f.write("{}".format(cfg)) # Start from checkpoint if cfg.TRAIN.start_epoch > 0: cfg.MODEL.weights_encoder = os.path.join( cfg.DIR, 'encoder_epoch_{}.pth'.format(cfg.TRAIN.start_epoch)) cfg.MODEL.weights_decoder = os.path.join( cfg.DIR, 'decoder_epoch_{}.pth'.format(cfg.TRAIN.start_epoch)) assert os.path.exists(cfg.MODEL.weights_encoder) and \ os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!" # Parse gpu ids gpus = parse_devices(args.gpus) gpus = [x.replace('gpu', '') for x in gpus] gpus = [int(x) for x in gpus] num_gpus = len(gpus) cfg.TRAIN.batch_size = num_gpus * cfg.TRAIN.batch_size_per_gpu cfg.TRAIN.max_iters = cfg.TRAIN.epoch_iters * cfg.TRAIN.num_epoch cfg.TRAIN.running_lr_encoder = cfg.TRAIN.lr_encoder cfg.TRAIN.running_lr_decoder = cfg.TRAIN.lr_decoder random.seed(cfg.TRAIN.seed) torch.manual_seed(cfg.TRAIN.seed) # - # Dataset and Loader dataset_train = TrainDataset( cfg.DATASET.root_dataset, cfg.DATASET.list_train, cfg.DATASET, batch_per_gpu=cfg.TRAIN.batch_size_per_gpu) loader_train = torch.utils.data.DataLoader( dataset_train, batch_size=len(gpus), # we have modified data_parallel shuffle=False, # we do not use this param collate_fn=user_scattered_collate, num_workers=cfg.TRAIN.workers, drop_last=True, pin_memory=True) iterator_train = iter(loader_train) a = next(iterator_train) a = a[0] a['img_data'].shape a['seg_label'].shape # + # plt.imshow(a['img_data'][0, :, : , :].permute((1, 2, 0))) # + # plt.imshow(a['seg_label'][0, :, :]) # + # plt.imshow(a['img_data'][1, :, : , :].permute((1, 2, 0))) # + # plt.imshow(a['seg_label'][1, :, :]) # - net_encoder = models.ModelBuilder.build_encoder( arch=cfg.MODEL.arch_encoder.lower(), fc_dim=cfg.MODEL.fc_dim, weights=cfg.MODEL.weights_encoder) code = net_encoder(a['img_data'], return_feature_maps=True) print(code[0].shape) print(code[1].shape) print(code[2].shape) print(code[3].shape) # + # net_encoder # - net_decoder = models.ModelBuilder.build_decoder( arch=cfg.MODEL.arch_decoder.lower(), fc_dim=cfg.MODEL.fc_dim, num_class=cfg.DATASET.num_class, weights=cfg.MODEL.weights_decoder) decode = net_decoder(code) decode.shape crit(decode, a['seg_label']) crit = nn.CrossEntropyLoss(ignore_index=-1) segmentation_module = models.SegmentationModule(net_encoder, net_decoder, crit) segmentation_module.train(not cfg.TRAIN.fix_bn) pred = net_encoder(a['img_data'], return_feature_maps=True) pred[3].shape net_encoder
Projects/Semantic_Edge_Detection/PyTorch/_archive_testing/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.028755, "end_time": "2021-02-23T19:41:46.952566", "exception": false, "start_time": "2021-02-23T19:41:46.923811", "status": "completed"} tags=[] # # Lower Back Pain Prediction with Decision Classifier Tree and Logistic Regression # + [markdown] papermill={"duration": 0.024944, "end_time": "2021-02-23T19:41:47.003145", "exception": false, "start_time": "2021-02-23T19:41:46.978201", "status": "completed"} tags=[] # ![](https://media.giphy.com/media/2XflxzjlPftx97UOB2w/giphy.gif) # + [markdown] papermill={"duration": 0.024666, "end_time": "2021-02-23T19:41:47.052652", "exception": false, "start_time": "2021-02-23T19:41:47.027986", "status": "completed"} tags=[] # ## Problem # + [markdown] papermill={"duration": 0.024496, "end_time": "2021-02-23T19:41:47.102560", "exception": false, "start_time": "2021-02-23T19:41:47.078064", "status": "completed"} tags=[] # In this Kaggle Notebook we would try to answer the following question: **given a set of data in several health factors, is it possible to predict lower back pain?** # + [markdown] papermill={"duration": 0.024503, "end_time": "2021-02-23T19:41:47.152929", "exception": false, "start_time": "2021-02-23T19:41:47.128426", "status": "completed"} tags=[] # ## Proposed Solution # + [markdown] papermill={"duration": 0.024809, "end_time": "2021-02-23T19:41:47.203598", "exception": false, "start_time": "2021-02-23T19:41:47.178789", "status": "completed"} tags=[] # We would make use of **Normalization**, **Decision Tree Classifier** to evaluate feature importance, and **Logistic Regression** to first build our prediction model. Later, we would find out the coefficients of the independent variables, check accuracy metrics and visualize true/false positives and true/false negatives. # + [markdown] papermill={"duration": 0.02561, "end_time": "2021-02-23T19:41:47.253957", "exception": false, "start_time": "2021-02-23T19:41:47.228347", "status": "completed"} tags=[] # ## Importing Libraries and Packages # + [markdown] papermill={"duration": 0.025402, "end_time": "2021-02-23T19:41:47.305271", "exception": false, "start_time": "2021-02-23T19:41:47.279869", "status": "completed"} tags=[] # ### Visualization # * Matplotlib # * Seaborn # # ### Data Processing # * Numpy # * Pandas # # ### Regression # * Sklearn # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 1.254407, "end_time": "2021-02-23T19:41:48.584403", "exception": false, "start_time": "2021-02-23T19:41:47.329996", "status": "completed"} tags=[] import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, explained_variance_score, confusion_matrix, accuracy_score, classification_report,plot_confusion_matrix from math import sqrt import matplotlib.pyplot as plt # %matplotlib inline import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # + [markdown] papermill={"duration": 0.025412, "end_time": "2021-02-23T19:41:48.637568", "exception": false, "start_time": "2021-02-23T19:41:48.612156", "status": "completed"} tags=[] # ## Exploratory Analysis # + [markdown] papermill={"duration": 0.025307, "end_time": "2021-02-23T19:41:48.688793", "exception": false, "start_time": "2021-02-23T19:41:48.663486", "status": "completed"} tags=[] # **First steps** # 1. Importing the dataset # 2. Finding out about its columns, shape, and dropping the last column # 3. Visualizing the top 5 rows of the data # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 0.071067, "end_time": "2021-02-23T19:41:48.785918", "exception": false, "start_time": "2021-02-23T19:41:48.714851", "status": "completed"} tags=[] df=pd.read_csv('../input/lower-back-pain-symptoms-dataset/Dataset_spine.csv') df=pd.DataFrame(df) df.head() # + papermill={"duration": 0.034596, "end_time": "2021-02-23T19:41:48.848089", "exception": false, "start_time": "2021-02-23T19:41:48.813493", "status": "completed"} tags=[] df.columns # + papermill={"duration": 0.036377, "end_time": "2021-02-23T19:41:48.912534", "exception": false, "start_time": "2021-02-23T19:41:48.876157", "status": "completed"} tags=[] df.shape # + papermill={"duration": 0.039126, "end_time": "2021-02-23T19:41:48.978776", "exception": false, "start_time": "2021-02-23T19:41:48.939650", "status": "completed"} tags=[] df.drop(['Unnamed: 13'], axis=1, inplace=True) # + papermill={"duration": 0.047494, "end_time": "2021-02-23T19:41:49.054406", "exception": false, "start_time": "2021-02-23T19:41:49.006912", "status": "completed"} tags=[] df.head() # + [markdown] papermill={"duration": 0.027432, "end_time": "2021-02-23T19:41:49.109271", "exception": false, "start_time": "2021-02-23T19:41:49.081839", "status": "completed"} tags=[] # The default columns' names don't gives us a lot of information, so we are renaming them with more descriptive headers. # + papermill={"duration": 0.036296, "end_time": "2021-02-23T19:41:49.173464", "exception": false, "start_time": "2021-02-23T19:41:49.137168", "status": "completed"} tags=[] columnas=['pelvic_incidence','pelvic tilt','lumbar_lordosis_angle','sacral_slope','pelvic_radius', 'degree_spondylolisthesis','pelvic_slope','direct_tilt','thoracic_slope','cervical_tilt' 'sacrum_angle','scoliosis_slope','result'] # + papermill={"duration": 0.050888, "end_time": "2021-02-23T19:41:49.252015", "exception": false, "start_time": "2021-02-23T19:41:49.201127", "status": "completed"} tags=[] df=df.rename(columns={'Col1':'pelvic_incidence', 'Col2':'pelvic tilt', 'Col3':'lumbar_lordosis_angle', 'Col4':'sacral_slope', 'Col5':'pelvic_radius', 'Col6':'degree_spondylolisthesis', 'Col7':'pelvic_slope', 'Col8':'direct_tilt', 'Col9':'thoracic_slope', 'Col10':'cervical_tilt', 'Col11':'sacrum_angle', 'Col12':'scoliosis_slope', 'Class_att':'result'}) df.head() # + [markdown] papermill={"duration": 0.027946, "end_time": "2021-02-23T19:41:49.308135", "exception": false, "start_time": "2021-02-23T19:41:49.280189", "status": "completed"} tags=[] # Finding if there are null values in the dataset. # + papermill={"duration": 0.038767, "end_time": "2021-02-23T19:41:49.374857", "exception": false, "start_time": "2021-02-23T19:41:49.336090", "status": "completed"} tags=[] df.isna().sum() # + [markdown] papermill={"duration": 0.029383, "end_time": "2021-02-23T19:41:49.432400", "exception": false, "start_time": "2021-02-23T19:41:49.403017", "status": "completed"} tags=[] # Changing the qualitative values to quantitative ones of our model # + _kg_hide-output=true papermill={"duration": 0.062247, "end_time": "2021-02-23T19:41:49.523105", "exception": false, "start_time": "2021-02-23T19:41:49.460858", "status": "completed"} tags=[] df['result'][df['result']=='Abnormal']=1 df['result'][df['result']=='Normal']=0 df.head() # + [markdown] papermill={"duration": 0.028955, "end_time": "2021-02-23T19:41:49.581679", "exception": false, "start_time": "2021-02-23T19:41:49.552724", "status": "completed"} tags=[] # Now, we are creating a simple normalization function. You can see that the last column would stay the same because it will serve as our dependent variable. # + papermill={"duration": 0.037059, "end_time": "2021-02-23T19:41:49.648071", "exception": false, "start_time": "2021-02-23T19:41:49.611012", "status": "completed"} tags=[] def normalize(dataset): DSNorm=((dataset-dataset.min())/(dataset.max()-dataset.min()))*20 DSNorm["result"]=dataset["result"] return DSNorm # + [markdown] papermill={"duration": 0.029291, "end_time": "2021-02-23T19:41:49.707003", "exception": false, "start_time": "2021-02-23T19:41:49.677712", "status": "completed"} tags=[] # Our normalized dataset. Later, we are dividing our columns into independent and dependent variables. # + papermill={"duration": 0.074431, "end_time": "2021-02-23T19:41:49.811073", "exception": false, "start_time": "2021-02-23T19:41:49.736642", "status": "completed"} tags=[] dfnorm=normalize(df) dfnorm.head() # + papermill={"duration": 0.038092, "end_time": "2021-02-23T19:41:49.878928", "exception": false, "start_time": "2021-02-23T19:41:49.840836", "status": "completed"} tags=[] x=dfnorm.drop('result', axis = 1).values y=dfnorm['result'] # + papermill={"duration": 0.037092, "end_time": "2021-02-23T19:41:49.946021", "exception": false, "start_time": "2021-02-23T19:41:49.908929", "status": "completed"} tags=[] y=y.astype(int) # + [markdown] papermill={"duration": 0.029756, "end_time": "2021-02-23T19:41:50.006153", "exception": false, "start_time": "2021-02-23T19:41:49.976397", "status": "completed"} tags=[] # Printing the shape to make sure the X and Y values aligned correctly # + papermill={"duration": 0.039303, "end_time": "2021-02-23T19:41:50.075598", "exception": false, "start_time": "2021-02-23T19:41:50.036295", "status": "completed"} tags=[] print("X:",x.shape) print("Y:",y.shape) # + papermill={"duration": 0.037273, "end_time": "2021-02-23T19:41:50.142872", "exception": false, "start_time": "2021-02-23T19:41:50.105599", "status": "completed"} tags=[] featurest=['pelvic_incidence','pelvic tilt','lumbar_lordosis_angle','sacral_slope','pelvic_radius', 'degree_spondylolisthesis','pelvic_slope','direct_tilt','thoracic_slope','cervical_tilt' 'sacrum_angle','scoliosis_slope'] # + [markdown] papermill={"duration": 0.030101, "end_time": "2021-02-23T19:41:50.203568", "exception": false, "start_time": "2021-02-23T19:41:50.173467", "status": "completed"} tags=[] # Creating our **Decision Tree Classifier** in order to extract feature importance # + _kg_hide-output=true papermill={"duration": 2.257295, "end_time": "2021-02-23T19:41:52.491303", "exception": false, "start_time": "2021-02-23T19:41:50.234008", "status": "completed"} tags=[] from sklearn.datasets import load_iris from sklearn import tree plt.figure(figsize=(40,40)) xt, yt = load_iris(return_X_y=True) dt = DecisionTreeClassifier() dt = dt.fit(x, y) tree.plot_tree(dt,feature_names=columnas,filled=True,rounded=True) # + [markdown] papermill={"duration": 0.037276, "end_time": "2021-02-23T19:41:52.566206", "exception": false, "start_time": "2021-02-23T19:41:52.528930", "status": "completed"} tags=[] # ![image.png](attachment:image.png) # + [markdown] papermill={"duration": 0.037264, "end_time": "2021-02-23T19:41:52.640961", "exception": false, "start_time": "2021-02-23T19:41:52.603697", "status": "completed"} tags=[] # Importance of each feature in the dataset # + papermill={"duration": 0.050495, "end_time": "2021-02-23T19:41:52.729099", "exception": false, "start_time": "2021-02-23T19:41:52.678604", "status": "completed"} tags=[] feat_name = [] importance = [] for i,column in enumerate(dfnorm.drop('result', axis = 1)): print('The Feature Importance for {} is : {}'.format(column.capitalize(), dt.feature_importances_[i])) feat_name.append(column) importance.append(dt.feature_importances_[i]) # + [markdown] papermill={"duration": 0.038649, "end_time": "2021-02-23T19:41:52.806528", "exception": false, "start_time": "2021-02-23T19:41:52.767879", "status": "completed"} tags=[] # Visualizing our feature importance # + papermill={"duration": 0.055732, "end_time": "2021-02-23T19:41:52.900809", "exception": false, "start_time": "2021-02-23T19:41:52.845077", "status": "completed"} tags=[] fi_df=zip(feat_name,importance) fi_df=pd.DataFrame(fi_df,columns=["Feature","Score"]) fi_df # + [markdown] papermill={"duration": 0.038635, "end_time": "2021-02-23T19:41:52.978212", "exception": false, "start_time": "2021-02-23T19:41:52.939577", "status": "completed"} tags=[] # Feature Importance in descending order according to score # + papermill={"duration": 0.131097, "end_time": "2021-02-23T19:41:53.148279", "exception": false, "start_time": "2021-02-23T19:41:53.017182", "status": "completed"} tags=[] fi_df=fi_df.sort_values('Score', ascending = False).reset_index() columns_re=fi_df['Feature'][0:10] fi_df # + [markdown] papermill={"duration": 0.040099, "end_time": "2021-02-23T19:41:53.228259", "exception": false, "start_time": "2021-02-23T19:41:53.188160", "status": "completed"} tags=[] # ## Modeling # + [markdown] papermill={"duration": 0.039967, "end_time": "2021-02-23T19:41:53.307819", "exception": false, "start_time": "2021-02-23T19:41:53.267852", "status": "completed"} tags=[] # Now, we are creating our variables X and Y, printing their shapes, and getting them ready for **Logistic Regression**. # + papermill={"duration": 0.051321, "end_time": "2021-02-23T19:41:53.398797", "exception": false, "start_time": "2021-02-23T19:41:53.347476", "status": "completed"} tags=[] x=dfnorm[columns_re].values y=dfnorm['result'] y=y.astype(int) print("X:",x.shape) print("Y:",y.shape) # + [markdown] papermill={"duration": 0.03906, "end_time": "2021-02-23T19:41:53.478326", "exception": false, "start_time": "2021-02-23T19:41:53.439266", "status": "completed"} tags=[] # Dividing our data in training and testing sets # + papermill={"duration": 0.051252, "end_time": "2021-02-23T19:41:53.569199", "exception": false, "start_time": "2021-02-23T19:41:53.517947", "status": "completed"} tags=[] x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8,test_size=0.2) print("X_train:",x_train.shape) print("Y_train:",y_train.shape,"\n") print("Y_test:",y_test.shape) print("X_test:",x_test.shape) # + [markdown] papermill={"duration": 0.0404, "end_time": "2021-02-23T19:41:53.649691", "exception": false, "start_time": "2021-02-23T19:41:53.609291", "status": "completed"} tags=[] # Fitting our **Logistic Regression Model** with the liblinear solver for our small dataset # + papermill={"duration": 0.056002, "end_time": "2021-02-23T19:41:53.772066", "exception": false, "start_time": "2021-02-23T19:41:53.716064", "status": "completed"} tags=[] lr=LogisticRegression(solver = 'liblinear') lr.fit(x, y) # + papermill={"duration": 0.048677, "end_time": "2021-02-23T19:41:53.860896", "exception": false, "start_time": "2021-02-23T19:41:53.812219", "status": "completed"} tags=[] y_pred=lr.predict(x_train) predict_proba=lr.predict_proba(x_train) # + [markdown] papermill={"duration": 0.04079, "end_time": "2021-02-23T19:41:53.941838", "exception": false, "start_time": "2021-02-23T19:41:53.901048", "status": "completed"} tags=[] # **Coefficients** of the independent variables # + papermill={"duration": 0.048892, "end_time": "2021-02-23T19:41:54.030632", "exception": false, "start_time": "2021-02-23T19:41:53.981740", "status": "completed"} tags=[] lr.coef_ # + [markdown] papermill={"duration": 0.040287, "end_time": "2021-02-23T19:41:54.111676", "exception": false, "start_time": "2021-02-23T19:41:54.071389", "status": "completed"} tags=[] # ## Testing # + [markdown] papermill={"duration": 0.040681, "end_time": "2021-02-23T19:41:54.193377", "exception": false, "start_time": "2021-02-23T19:41:54.152696", "status": "completed"} tags=[] # The accuracy of our model # + papermill={"duration": 0.051883, "end_time": "2021-02-23T19:41:54.286007", "exception": false, "start_time": "2021-02-23T19:41:54.234124", "status": "completed"} tags=[] print("Training Dataset Accuracy:",lr.score(x_train,y_train)) print("Testing Dataset Accuracy:",lr.score(x_test,y_test)) # + [markdown] papermill={"duration": 0.040683, "end_time": "2021-02-23T19:41:54.367803", "exception": false, "start_time": "2021-02-23T19:41:54.327120", "status": "completed"} tags=[] # Our **Classification Report** # + papermill={"duration": 0.056174, "end_time": "2021-02-23T19:41:54.465243", "exception": false, "start_time": "2021-02-23T19:41:54.409069", "status": "completed"} tags=[] print(classification_report(y_train,y_pred)) # + papermill={"duration": 0.054337, "end_time": "2021-02-23T19:41:54.562946", "exception": false, "start_time": "2021-02-23T19:41:54.508609", "status": "completed"} tags=[] cm=confusion_matrix(y_train,y_pred,normalize='true') print(cm) # + [markdown] papermill={"duration": 0.041631, "end_time": "2021-02-23T19:41:54.647727", "exception": false, "start_time": "2021-02-23T19:41:54.606096", "status": "completed"} tags=[] # **Confussion Matrix** displaying True/False Positives and True/False Negatives # + papermill={"duration": 0.224151, "end_time": "2021-02-23T19:41:54.917018", "exception": false, "start_time": "2021-02-23T19:41:54.692867", "status": "completed"} tags=[] fig,ax=plt.subplots(figsize=(8,8)) plot_confusion_matrix(lr,x_train,y_train,normalize='true', cmap='Blues',ax=ax, display_labels=lr.classes_) # + papermill={"duration": 0.043924, "end_time": "2021-02-23T19:41:55.004856", "exception": false, "start_time": "2021-02-23T19:41:54.960932", "status": "completed"} tags=[]
Model/lower-back-pain-prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="nCc3XZEyG3XV" # Lambda School Data Science # # *Unit 2, Sprint 3, Module 3* # # --- # # # # Applied Modeling, Module 3 # # You will use your portfolio project dataset for all assignments this sprint. # # ## Assignment # # Complete these tasks for your project, and document your work. # # - [ ] Continue to iterate on your project: data cleaning, exploration, feature engineering, modeling. # - [ ] Make at least 1 partial dependence plot to explain your model. # - [ ] Share at least 1 visualization on Slack. # # (If you have not yet completed an initial model yet for your portfolio project, then do today's assignment using your Tanzania Waterpumps model.) # # ## Stretch Goals # - [ ] Make multiple PDPs with 1 feature in isolation. # - [ ] Make multiple PDPs with 2 features in interaction. # - [ ] Use Plotly to make a 3D PDP. # - [ ] Make PDPs with categorical feature(s). Use Ordinal Encoder, outside of a pipeline, to encode your data first. If there is a natural ordering, then take the time to encode it that way, instead of random integers. Then use the encoded data with pdpbox. Get readable category names on your plot, instead of integer category codes. # # ## Links # - [<NAME>: Interpretable Machine Learning — Partial Dependence Plots](https://christophm.github.io/interpretable-ml-book/pdp.html) + [animated explanation](https://twitter.com/ChristophMolnar/status/1066398522608635904) # - [Kaggle / <NAME>: Machine Learning Explainability — Partial Dependence Plots](https://www.kaggle.com/dansbecker/partial-plots) # - [Plotly: 3D PDP example](https://plot.ly/scikit-learn/plot-partial-dependence/#partial-dependence-of-house-value-on-median-age-and-average-occupancy) # + [markdown] heading_collapsed=true # ### Provided # + hidden=true # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/' # !pip install category_encoders==2.* # !pip install eli5 # !pip install pdpbox # If you're working locally: else: DATA_PATH = '../data/' # + [markdown] heading_collapsed=true # ## Import # + hidden=true import numpy as np import pandas as pd import os import seaborn as sns from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split # from sklearn.ensemble import RandomForestClassifier # from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import make_pipeline import category_encoders as ce from sklearn.impute import SimpleImputer # import eli5 # from eli5.sklearn import PermutationImportance from xgboost import XGBClassifier from pdpbox.pdp import pdp_interact, pdp_interact_plot from pdpbox.pdp import pdp_isolate, pdp_plot # %matplotlib inline pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) # - # ## Functions # + def get_some(year): df = pd.DataFrame( columns=['backers_count', 'blurb', 'category', 'converted_pledged_amount', 'country', 'created_at', 'creator', 'currency', 'currency_symbol', 'currency_trailing_code', 'current_currency', 'deadline', 'disable_communication', 'fx_rate', 'goal', 'id', 'is_starrable', 'launched_at', 'name', 'photo', 'pledged', 'profile', 'slug', 'source_url', 'spotlight', 'staff_pick', 'state', 'state_changed_at', 'static_usd_rate', 'urls', 'usd_pledged', 'usd_type', 'location', 'friends', 'is_backing', 'is_starred', 'permissions'] ) folders = os.listdir(f'Data\\{year}') #Get the monthly folders inside the year for folder in folders[:1]: files = os.listdir(f'Data\\{year}\\{folder}') #Get the filenames inside monthly folders monthly = pd.concat( [pd.read_csv( f'Data\\{year}\\{folder}\\{file}') for file in files[:2]] #Not getting a whole year for now ) #Reads in all the csv files in a given month df = df.append(monthly) df = df.reset_index().drop(columns='index') return df def datetime_convert(df): #Time series data df['created_at'] = pd.to_datetime(df['created_at'], unit='s') df['deadline'] = pd.to_datetime(df['deadline'], unit='s') df['launched_at'] = pd.to_datetime(df['launched_at'], unit='s') df['state_changed_at'] = pd.to_datetime(df['state_changed_at'], unit='s') # df['created_at'] = pd.to_datetime(df['created_at'], format='%m%d%Y') # df['deadline'] = pd.to_datetime(df['deadline'], format='%m%d%Y') # df['launched_at'] = pd.to_datetime(df['launched_at'], format='%m%d%Y') # df['state_changed_at'] = pd.to_datetime(df['state_changed_at'], format='%m%d%Y') return df def drop_dupes(df): df = df[~df.duplicated('id')] return df # - # ## Fetch and wrangle # ##### Fetch # cd C:\Users\Hakuj\Documents\DataSets\Kickstarter df = get_some(2018) # ##### Wrangle df = drop_dupes(df) # ## PDP # #### Model model = make_pipeline( SimpleImputer(strategy='most_frequent'), ce.OrdinalEncoder(), XGBClassifier(n_estimators=100, random_state=42, n_jobs=-1) ) X = df.drop(columns=['state','pledged', 'usd_pledged', 'state']) y = df['state'] X_train, X_test,y_train, y_test = train_test_split(X, y, random_state=42) X_train_transformed = transformer.fit(X_train) X_test_transformed = transformer.fit(X_test) model.fit(X_train, y_train) X_test.columns # #### Plot isolated = pdp_isolate( model=model, dataset=X_test, model_features=X_test.columns, feature='spotlight' ) pdp_plot(isolated, feature_name='spotlight'); pdp_plot(isolated, feature_name='spotlight', plot_lines=True);
module3/assignment_applied_modeling_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from base.closed_form import ClosedFormRegression from data import data import matplotlib import numpy as np # change this to your own backend matplotlib.interactive(False) import matplotlib.pyplot as plt % matplotlib inline def plot_data_point(X_train, y_train, X_test, y_test): plt.figure(2,figsize=(8,6), dpi= 80, facecolor='w', edgecolor='k') plt.ylim([-2,2]) plt.xlabel("x") plt.ylabel("y") plt.plot(X_train, y_train, 'o', ms=3, label='Train data') plt.plot(X_test, y_test, 'x', ms=3, label='Test data') plt.title('Data point/regression model') def plot_regression_model(model, fmt): plt.figure(2) plt.ylim([-2,2]) X = np.arange(-2, 2, 0.01) y = model.predict(X) plt.plot(X, y, fmt, label='Model for ' + model.description()) plt.legend(fancybox=True, loc='lower right', framealpha=0.9, prop={'size': 10}) def plot_mse(model, mse,fig, fmt, color): print "Train MSE for model", model.description(), "is", mse[0] print "Test MSE for model", model.description(), "is", mse[1] if __name__ == '__main__': X_train, y_train, X_test, y_test = data.load(train_coefficient=0.2, normalize=True) plt.figure() model3 = ClosedFormRegression(order=3, lambda_var=0) model5 = ClosedFormRegression(order=5, lambda_var=0) model7 = ClosedFormRegression(order=7, lambda_var=0) plot_data_point(X_train, y_train, X_test, y_test) # fit model model3.fit(X_train, y_train) model5.fit(X_train, y_train) model7.fit(X_train, y_train) # calculate mse mse3 = model3.mse(X_train, y_train), model3.mse(X_test, y_test) mse5 = model5.mse(X_train, y_train), model5.mse(X_test, y_test) mse7 = model7.mse(X_train, y_train), model7.mse(X_test, y_test) # Check if a plot is to be made for the entered alpha plot_regression_model(model3, 'r') plot_regression_model(model5, 'y') plot_regression_model(model7, 'b') # plot train mse, plot_mse(model3, mse3,1, '-', 'r') plot_mse(model5, mse5,2, '-', 'y') plot_mse(model7, mse7,3, '-', 'b') # show plots plt.show() # -
hw1/main_closed_form.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from sklearn.tree import DecisionTreeClassifier, plot_tree import pandas as pd data = pd.read_csv('../../data/20questions/micro.csv') display(data) target = data[data.columns[0]] feature_names = data.columns[1:] features = data[feature_names] model = DecisionTreeClassifier().fit(features, target) plot_tree(model, feature_names=feature_names, class_names=model.classes_, impurity=False, node_ids=False, label='none', filled=True, fontsize=8)
src/20questions/decisiontree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import random fh = open(r'/Users/liudongying/Desktop/pyth.txt') text = fh.read() idioms = text.split(' ') print(idioms) fh.close() for number in range(len(idioms)): n = len(idioms[number]) code = random.randint(100000000,999999999) new_code = str(code) + str(code) print(new_code) new_idioms = [] for i in range(len(idioms[number])): word = ord((idioms[number])[i]) + ord(new_code[i])-48 if 97 < word < 122: new_idioms.append(chr(word)) elif word > 122: new_idioms.append(chr(word-26)) idioms[number] = new_idioms if n < 10: for x in range(n,10): idioms[number].append(random.choice(['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'])) print(idioms) fh = open(r'/Users/liudongying/Desktop/pyth_new.txt', 'w') fh.writelines(str(idioms)) fh.close()
chapter2/homework/computer/5-10/201611680485.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # K-近邻算法(KNN) # K nearest neighbour # ## 0、导引 # ### 如何进行电影分类 # 众所周知,电影可以按照题材分类,然而题材本身是如何定义的?由谁来判定某部电影属于哪 # 个题材?也就是说同一题材的电影具有哪些公共特征?这些都是在进行电影分类时必须要考虑的问 # 题。没有哪个电影人会说自己制作的电影和以前的某部电影类似,但我们确实知道每部电影在风格 # 上的确有可能会和同题材的电影相近。那么动作片具有哪些共有特征,使得动作片之间非常类似, # 而与爱情片存在着明显的差别呢?动作片中也会存在接吻镜头,爱情片中也会存在打斗场景,我们 # 不能单纯依靠是否存在打斗或者亲吻来判断影片的类型。但是爱情片中的亲吻镜头更多,动作片中 # 的打斗场景也更频繁,基于此类场景在某部电影中出现的次数可以用来进行电影分类。 # # 本章介绍第一个机器学习算法:K-近邻算法,它非常有效而且易于掌握。 # ## 1、k-近邻算法原理 # 简单地说,K-近邻算法采用测量不同特征值之间的距离方法进行分类。 # - 优点:精度高、对异常值不敏感、无数据输入假定。 # - 缺点:时间复杂度高、空间复杂度高。 # - 适用数据范围:数值型和标称型。 # ### 工作原理 # 存在一个样本数据集合,也称作训练样本集,并且样本集中每个数据都存在标签,即我们知道样本集中每一数据 # 与所属分类的对应关系。输人没有标签的新数据后,将新数据的每个特征与样本集中数据对应的 # 特征进行比较,然后算法提取样本集中特征最相似数据(最近邻)的分类标签。一般来说,我们 # 只选择样本数据集中前K个最相似的数据,这就是K-近邻算法中K的出处,通常*K是不大于20的整数。 # 最后 ,选择K个最相似数据中出现次数最多的分类,作为新数据的分类*。 # 回到前面电影分类的例子,使用K-近邻算法分类爱情片和动作片。有人曾经统计过很多电影的打斗镜头和接吻镜头,下图显示了6部电影的打斗和接吻次数。假如有一部未看过的电影,如何确定它是爱情片还是动作片呢?我们可以使用K-近邻算法来解决这个问题。 # # ![1.PNG](attachment:1.PNG) # 首先我们需要知道这个未知电影存在多少个打斗镜头和接吻镜头,上图中问号位置是该未知电影出现的镜头数图形化展示,具体数字参见下表。 # # ![2.PNG](attachment:2.PNG) # 即使不知道未知电影属于哪种类型,我们也可以通过某种方法计算出来。首先计算未知电影与样本集中其他电影的距离,如图所示。 # # ![3.PNG](attachment:3.PNG) # 现在我们得到了样本集中所有电影与未知电影的距离,按照距离递增排序,可以找到K个距 # 离最近的电影。假定k=3,则三个最靠近的电影依次是California Man、He's Not Really into Dudes、Beautiful Woman。K-近邻算法按照距离最近的三部电影的类型,决定未知电影的类型,而这三部电影全是爱情片,因此我们判定未知电影是爱情片。 # ### 欧几里得距离(Euclidean Distance) # 欧氏距离是最常见的距离度量,衡量的是多维空间中各个点之间的绝对距离。公式如下: # # ![4.png](attachment:4.png) # + import numpy as np import pandas as pd from sklearn.neighbors import KNeighborsClassifier # - movie = pd.read_excel('../data/movies.xlsx',sheet_name=1).head() movie # 数据 X = movie[['武打镜头','接吻镜头']] X # 目标值 y = movie['分类情况'] y # + knn = KNeighborsClassifier(n_neighbors=5) # 该方法,就是训练数据 knn.fit(X,y) # + # 预测新电影 # 碟中谍6 100,3 # 战狼二 200,1 # 山楂树之恋 0,10 X_test = pd.DataFrame({'武打镜头':[100,200,0],'接吻镜头':[3,1,10]}) X_test # - knn.predict(X_test) knn.predict(X_test) movie s = ((movie['武打镜头'] - 100)**2 + (movie['接吻镜头'] - 3)**2)**0.5 index = s.sort_values().index index movie['分类情况'][index[:5]] # + # 碟中谍6 是动作片概率是60%,爱情片概率40% knn.predict_proba(X_test) # - # ## 2、在scikit-learn库中使用k-近邻算法 # - 分类问题:from sklearn.neighbors import KNeighborsClassifier # # - 回归问题:from sklearn.neighbors import KNeighborsRegressor # ### 0)一个最简单的例子 # 身高、体重、鞋子尺码数据对应性别 # ### 1)用于分类 # 导包,机器学习的算法KNN、数据蓝蝴蝶 # # 获取训练样本 # 绘制图形 # 定义KNN分类器 # 第一步,训练数据 # 第二步预测数据:,所预测的数据,自己创造,就是上面所显示图片的背景点 # # 生成预测数据 # 对数据进行预测 # 显示数据 # ### 2)用于回归 # 回归用于对趋势的预测 # 导包 # 生成样本数据 # 生成测试数据的结果 # 第一步:生成模型,并训练数据 # 第二步:使用模型,预测数据 # 绘图显示数据 # ### 练习 # 人类动作识别 # 步行,上楼,下楼,坐着,站立和躺着 # ![](stand-sit-run.jpg) # 数据采集每个人在腰部穿着智能手机,进行了六个活动(步行,上楼,下楼,坐着,站立和躺着)。采用嵌入式加速度计和陀螺仪,以50Hz的恒定速度捕获3轴线性加速度和3轴角速度,来获取数据 # 导入数据 # 获取数据 # 绘制 # ## 3、作业 # #### 1、预测年收入是否大于50K美元 # 读取adult.txt文件,最后一列是年收入,并使用KNN算法训练模型,然后使用模型预测一个人的年收入是否大于50 # 获取年龄、教育程度、职位、每周工作时间作为机器学习数据 # 获取薪水作为对应结果 # 数据转换,将String类型数据转换为int # 【知识点】map方法,进行数据转换 # 切片:训练数据和预测数据 # 生成算法 # 第一步:训练数据 # 第二步:预测数据 # 保存训练模型 # from sklearn.externals import joblib # #### 2、小麦种类预测 # 读取seeds.tsv文件,最后一列是小麦品种,其他列是小麦特征 # #### 3、改进约会网站的匹配效果 # 读取datingTestSet.txt文件,最后一列是喜欢程度。模型:根据前几列的信息,预测喜欢程度
KNN/.ipynb_checkpoints/knn-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Create training data set # # Toolkit to transform the [Husser 2013](http://adsabs.harvard.edu/abs/2013A%26A...553A...6H) grid of stellar atmosphere models to the form useful as training data for the neural network limb darkening approach. # ### Introduction # # The grid of atmosphere models was accessed from [http://phoenix.astro.physik.uni-goettingen.de/](http://phoenix.astro.physik.uni-goettingen.de/) on 28th March 2017. The required data are stored in the `spec_int` files, which for each model atmosphere contain a spectral intensity value for a range of (~80) angles of $\mu$ for every 0.1 nm in the range 50 - 2600 nm. # # The form of training data required for the neural network is for a particular bandpass, for example *Kepler*, the training input values are the stellar atmospheric parameters ($T_{eff}$, $\log g$ and $[Fe/H]$) as well as cosine of the limb angle ($\mu$), with the output as the value of the intensity. # # To produce the intensity values for each model atmosphere and limb angle, the spectral intensity information from the model atmosphere must be convolved with the spectral response curve of the filter and that is then integrated over wavelength. # # The key steps required are: # - Build a function for one stellar atmosphere model # - to convolve with each bandpass # - integrate over wavelength # - for each given mu angle # - correct for radius definition # - Loop through all of the atmosphere models # - Export to file # # This notebook goes through these steps to produce the entire grid of training data for a range of wavelengths and few other corrections required along the way. # # ### Getting started # # Firstly import the packages required... # + % pylab inline import numpy as np import glob from scipy.interpolate import interp1d from scipy.integrate import simps from astropy.io import fits from astropy.utils.console import ProgressBar import pandas as pd import matplotlib.pylab as plt import seaborn as sb # - # ### The model atmospheres # # Below the function to read in the required information from the model atmosphere files def read_PHOENIX(chosen_path): with fits.open(chosen_path) as f: I = (f[0].data)/100. # matrix of spectral intensity values, scaled down for easier calculations mu = f['MU'].data # mu values spectral intensity is calculated for in this model CRVAL1 = f[0].header['CRVAL1'] # wavelength start value (in Angstrom) CDELT1 = f[0].header['CDELT1'] # wavelength step size (in Angstrom) teff = f[0].header['PHXTEFF'] # effective temperature of model (K) logg = f[0].header['PHXLOGG'] # log g of model feh = f[0].header['PHXM_H'] # metallicity of model wavelengths = (np.arange(I.shape[1]) * CDELT1 + CRVAL1)/10. # convert to nm to match response functions return wavelengths, I, mu, teff, logg, feh # We can now locate all the atmosphere models, with the list of paths stored in: `model_list` # # **Note:** this will only work if the models are stored in the same location on the computer this is run on model_list = [] model_list.extend(glob.glob('../phoenix2011/Z-0.0/*.fits')) model_list.extend(glob.glob('../phoenix2011/Z-0.5/*.fits')) model_list.extend(glob.glob('../phoenix2011/Z-1.0/*.fits')) model_list.extend(glob.glob('../phoenix2011/Z-1.5/*.fits')) model_list.extend(glob.glob('../phoenix2011/Z-2.0/*.fits')) model_list.extend(glob.glob('../phoenix2011/Z-3.0/*.fits')) model_list.extend(glob.glob('../phoenix2011/Z-4.0/*.fits')) model_list.extend(glob.glob('../phoenix2011/Z+0.5/*.fits')) model_list.extend(glob.glob('../phoenix2011/Z+1.0/*.fits')) print("Number of models found: ",len(model_list)) # ### The bandpasses # # The locations of each of the response functions are defined in the following dictionary, and are then loaded into memory in the dictionary, `filters` # # More information about each of the bandpasses included can be found in the [README](./response_functions/0_README.md) file in the directory with the response functions. transmission = {'Kp' : './response_functions/kepler_response.txt', 'S1' : './response_functions/spitzer_1.txt', 'S2' : './response_functions/spitzer_2.txt', 'S3' : './response_functions/spitzer_3.txt', 'S4' : './response_functions/spitzer_4.txt', 'u' : './response_functions/Stroemgren_u.txt', 'v' : './response_functions/Stroemgren_v.txt', 'b' : './response_functions/Stroemgren_b.txt', 'y' : './response_functions/Stroemgren_y.txt', 'U' : './response_functions/Bessel_U-1.txt', 'V' : './response_functions/Bessel_V-1.txt', 'B' : './response_functions/Bessel_B-1.txt', 'R' : './response_functions/Bessel_R-1.txt', 'I' : './response_functions/Bessel_I-1.txt', 'J' : './response_functions/2MASS_Jband.txt', 'H' : './response_functions/2MASS_Hband.txt', 'K' : './response_functions/2MASS_Kband.txt', 'u_' : './response_functions/SDSS_u.txt', 'g_' : './response_functions/SDSS_g.txt', 'r_' : './response_functions/SDSS_r.txt', 'i_' : './response_functions/SDSS_i.txt', 'z_' : './response_functions/SDSS_z.txt', 'vT' : './response_functions/v_tycho.txt', 'bT' : './response_functions/b_tycho.txt', 'Cp' : './response_functions/cheops.txt' } filters = {} for filt in transmission: filters[filt] = np.loadtxt(transmission[filt]) # Since the calculated wavelengths in each model atmosphere output file are the same, we can save some time by interpolating the bandpasses for each filter to match the PHOENIX model output for the convolution and integration step. The interpolated bandpasses are then stored in the dictionary, `filt_int`. # + init_wavelengths, _I, _mu, _teff, _logg, _feh = read_PHOENIX(model_list[409]) filt_int = {} for filt in filters: filt_int[filt] = np.interp(init_wavelengths,filters[filt][:,0],filters[filt][:,1]) # - # ### Example spectrum # # To demonstrate the process involved in producing the grid, here are a couple of example plots. # # Below is a plot of the model atmosphere output at 2 different limb angles (shown in red), plotted with the *Kepler* spectral response (shown in orange), and the result of the convolution of the two for each limb angle (shown in blue). # + _filt_spec = (_I * filt_int['Kp']).T print('Example model:',model_list[409]) print('Effective temperature:',_teff,'K, log g:',_logg,', [Fe/H]:',_feh) j = 40 cp = sb.color_palette('deep') with sb.axes_style('white'): fig,axs = plt.subplots(1,2,figsize=(12,5)) axs2 = axs[0].twinx() axs[0].plot(init_wavelengths,_filt_spec[:,-1]/np.max(_filt_spec[:,-1]),lw=0.5,alpha=0.7,label=r'$F_{\lambda} S_{\lambda}$') axs2.plot(init_wavelengths,_I[-1,:],lw=0.5,c=cp[2],alpha=0.7,label=r'$F_{\lambda}$') axs[0].plot(init_wavelengths,filt_int['Kp']/np.max(filt_int['Kp']),label=r'$S_{\lambda}$') axs[0].legend(loc=2) axs2.legend(loc=1) axs4 = axs[1].twinx() axs[1].plot(init_wavelengths,_filt_spec[:,j]/np.max(_filt_spec[:,j]),lw=0.5,alpha=0.7,label=r'$F_{\lambda} S_{\lambda}$') axs4.plot(init_wavelengths,_I[j,:],lw=0.5,c=cp[2],alpha=0.7,label=r'$F_{\lambda}$') axs[1].plot(init_wavelengths,filt_int['Kp']/np.max(filt_int['Kp']),label=r'$S_{\lambda}$') axs[1].legend(loc=2) axs4.legend(loc=1) plt.setp(axs[0],title=r'$\mu$ = 1') plt.setp(axs[1],title=r'$\mu$ = '+str(_mu[j])) plt.setp(axs[0],xlim=[330,1000]) plt.setp(axs[1],xlim=[330,1000]) plt.setp(axs[0],xlabel=r'wavelength, nm',ylabel=r'scaled intensity / scaled transmission') plt.setp(axs2,ylabel=r'spectral flux') plt.setp(axs2,ylim=[0,6e12]) plt.setp(axs[1],xlabel=r'wavelength, nm') plt.setp(axs4,ylabel=r'spectral flux') plt.setp(axs4,ylim=[0,1e12]) # - # For that same model atmosphere, here is the limb profile for the *Kepler* bandpass as a result of intergrating the above convolutions with respect to wavelength for each limb angle. # + _flux = [] for j in range(_mu.shape[0]): _flux.append(simps(_filt_spec[:,j]*init_wavelengths,init_wavelengths)) _flux = _flux/(_flux[-1]) _flux = np.array(_flux) with sb.axes_style('darkgrid'): fig,axs = plt.subplots(1,1,figsize=(5,4)) axs.plot(_mu,_flux,marker='.') plt.setp(axs,xlabel=r'$\mu$') plt.setp(axs,ylabel=r'$I_{\lambda}(\mu)$') plt.setp(axs,xlim=[0,1],ylim=[0,1]) # - # ### Stellar radius definition # # As can be seen from the plot above, the stellar radius used to define the values of $\mu$ is not the same as how we would define the edge of a star for an exoplanet transit. In order to correct for this inconsistency, we need to redefine the stellar radius as some value inside where it is currently defined and then recalculate the values for $\mu$. # # Following [Espinoza et al 2015](http://adsabs.harvard.edu/abs/2015MNRAS.450.1879E) we redefine the stellar radius as the radius where the maximum gradient in intensity is found, which is defined in the function `new_get_rmax`. # # An example of this correction for the same model atmosphere as before is shown below. def new_get_rmax(mu,I0): # convert mu to r r = np.sqrt(1.-(mu**2.)) # find the maximum derivative point i = np.argmax(abs(np.diff(I0)/np.diff(r))) # make radius correction to values inside new radius r_new = r[i:]/r[i] mu_new = np.sqrt(1-r_new**2) ip_new = I0[i:] return r_new, mu_new, ip_new, r[i], mu[i] # + _r = np.sqrt(1.-(_mu**2.)) # Also calculate limb profile for the 2MASS K band _filt_spec2 = (_I * filt_int['K']).T _flux2 = [] for j in range(_mu.shape[0]): _flux2.append(simps(_filt_spec2[:,j]*init_wavelengths,init_wavelengths)) _flux2 = _flux2/(_flux2[-1]) _flux2 = np.array(_flux2) # Apply corrections new_r, new_mu, new_I0, _, _ = new_get_rmax(_mu,_flux) # Kepler band new_r2, new_mu2, new_I02, _, _ = new_get_rmax(_mu,_flux2) # 2MASS K band band # Plotting correction gp = sb.color_palette("BuGn_r") bp = sb.color_palette("GnBu_d") with sb.axes_style('darkgrid'): fig,axs = plt.subplots(2,3,figsize=(15,6),sharey='row',sharex='col') axs[0,0].plot(_mu,_flux,label=r'Kepler band',c=gp[1]) axs[0,0].plot(_mu,_flux2,label=r'2MASS K band',c=bp[1]) plt.setp(axs[0,0],xlim=[0,1]) plt.setp(axs[0,0],ylim=[0,1]) plt.setp(axs[0,0],ylabel=r'Scaled intensity') axs[0,1].plot(_r,_flux,label=r'Kepler band',c=gp[1]) axs[0,1].plot(_r,_flux2,label=r'2MASS K band',c=bp[1]) plt.setp(axs[0,1],xlim=[0,1]) plt.setp(axs[0,1],ylim=[0,1]) axs[0,1].legend() axs[1,0].plot(new_mu,new_I0,label=r'adjusted Kepler band',c=gp[2]) axs[1,0].plot(new_mu2,new_I02,label=r'adjusted 2MASS K band',c=bp[2]) plt.setp(axs[1,0],xlim=[0,1]) plt.setp(axs[1,0],ylim=[0,1]) plt.setp(axs[1,0],xlabel=r'$\mu$') plt.setp(axs[1,0],ylabel=r'Scaled intensity') axs[1,1].plot(new_r,new_I0,label=r'adjusted Kepler band',c=gp[2]) axs[1,1].plot(new_r2,new_I02,label=r'adjusted 2MASS K band',c=bp[2]) plt.setp(axs[1,1],xlim=[0,1]) plt.setp(axs[1,1],ylim=[0,1]) plt.setp(axs[1,1],xlabel=r'$R / R_\star$') axs[1,1].legend() axs[0,2].plot(_r,_flux,label=r'Kepler band',c=gp[1]) axs[0,2].plot(_r,_flux2,label=r'2MASS K band',c=bp[1]) plt.setp(axs[0,2],xlim=[0.995,1]) plt.setp(axs[0,2],ylim=[0,1]) axs[1,2].plot(new_r,new_I0,label=r'adjusted Kepler band',c=gp[2]) axs[1,2].plot(new_r2,new_I02,label=r'adjusted 2MASS K band',c=bp[2]) plt.setp(axs[1,2],xlim=[0.995,1]) plt.setp(axs[1,2],ylim=[0,1]) plt.setp(axs[1,2],xlabel=r'$R / R_\star$') plt.subplots_adjust(hspace = 0.1, wspace = 0.15) # - # ### Generating the grid # # Now we can produce the grid itself, and save the table to use later. # + columns = ['Teff','logg','Z','Filt','mu','intensity','mu_fac','r_fac'] grid1 = [] grid2 = [] with ProgressBar(len(model_list), ipython_widget=True) as bar: for item in model_list: wavelengths, I, mu, teff, logg, feh = read_PHOENIX(item) for filt in filters: filt_spec = (I * filt_int[filt]).T flux = [] for j in range(mu.shape[0]): flux.append(simps(filt_spec[:,j]*wavelengths,wavelengths)) flux = flux/(flux[-1]) flux = np.array(flux) new_r,new_mu,new_I0,r_fac,mu_fac = new_get_rmax(mu,flux) even_mus = np.linspace(new_mu.min(),1,200) interp_I = interp1d(new_mu,new_I0,kind='quadratic',assume_sorted=True)(even_mus) for q in range(mu.shape[0]): grid1.append([teff,logg,feh,filt,mu[q],flux[q],mu_fac,r_fac]) for s in range(even_mus.shape[0]): grid2.append([teff,logg,feh,filt,even_mus[s],interp_I[s],mu_fac,r_fac]) bar.update() # + df = pd.DataFrame(data=grid1,columns=columns) df2 = pd.DataFrame(data=grid2,columns=columns) # for same distribution of mu as defined in model df.to_csv('phoenix_intensity_table.csv') # for 200 evenly-spaced mu values (intensities interpolated) df2.to_csv('phoenix_intensity_table_resampled.csv') # - # Files are not included in the repository, as they are 1.1 GB and 3.5 GB respectively
building_input_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/VirtualGoat/Twitter-Data-Mining/blob/master/final.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="nowAVUAA7Ciz" colab_type="code" outputId="6c7a781a-147e-44f6-be35-2985f705ed57" colab={"base_uri": "https://localhost:8080/", "height": 92} #Accessing the data that has been stored. from google.colab import drive import pickle drive.mount('/content/drive') DATA_PATH = "/content/drive/My Drive/Colab Notebooks/Internship/Tweet Data/New/Thane/Passive" users4=open(DATA_PATH+'/passiveupdated.pickle','rb') dict1=pickle.load(users4) users4.close() print(len(dict1)) print(dict1) # + id="C2StZoqE904S" colab_type="code" outputId="07d3ab1f-725a-420b-b4ec-d3cb1be320da" colab={"base_uri": "https://localhost:8080/", "height": 74} users5=open('/content/drive/My Drive/Colab Notebooks/Internship/Tweet Data/New/Thane/Passive/passivefull.pickle','rb') dict2=pickle.load(users5) users5.close() print(len(dict2)) print(dict2) # + id="AHuSI3zy5S5t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 74} outputId="041faa99-7f06-4517-c66b-fcc9b4e7e09a" dict11 = set(dict1) dict22 = set(dict2) # for name in dict11.intersection(dict22): # print(name) y=dict11.intersection(dict22) print(len(y)) print(y) # + id="lXbBwOB-538A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f553ddcd-01d8-4287-800f-270539a5e78a" for i in y: del dict2[i] print(len(dict2)) # + id="kxUMYcGn6ec4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="da15f80c-791b-4553-b840-8fe0f8a8a440" print(dict2) # + id="Xg5QUn-p7Jqn" colab_type="code" outputId="9960f84f-223b-4080-8883-32c864f0a9e9" colab={"base_uri": "https://localhost:8080/", "height": 70} def Merge(dict1, dict2): return(dict2.update(dict1)) Merge(dict1, dict2) print(len(dict2)) print(dict2) # + id="7womY8dZ_dKd" colab_type="code" outputId="c944b366-36b6-447d-a478-8ade778023a3" colab={"base_uri": "https://localhost:8080/", "height": 55} desc={k: v for k, v in sorted(dict1.items(), key=lambda item: item[1],reverse=True)} print(desc) # + id="tHt8pd5q9jmu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="682d2081-caaf-4b76-f991-0260018c856e" print(len(desc)) # + id="GJVZypFjACCl" colab_type="code" colab={} for k,v in desc.items(): print(k.lower()) # + id="ZSQm6qfNEcP3" colab_type="code" outputId="fe6f1f4d-62ab-40d7-9f92-79b6f25caa02" colab={"base_uri": "https://localhost:8080/", "height": 74} no=['decor','realt','digi','acre','resid','stock','group','article','project','commerc','brick','india','guru','new','market', \ 'enterp','vastu','astro','estate','mall','hous','Thane','sky','room','home','flat','design','interior','furniture','trend', \ 'pack','build','work','propert','prop','vastu','gym','loan','luxury','bloomberg','business','direct','time',\ 'revenue','day','bank','agen','job','minis','corpo','tech', \ 'boss','nation','associat','develop','broth','invest','people','dna','media','break','bombay','mumbai','thane','lodha','hiranandani','world','estate','logis','team', \ 'consul','connec','school','tour','finan','asia','multi','apart','hospi','floor','repor','fashion','exhibition','trip','classif','wall','football','cricket',\ 'welfare','history','income','height','shop','global','hotel','restaurant','office','environ','proje','wood','rent','deal','sale','press','craig',\ 'bandra','pepper','noise'] from copy import deepcopy desc1=deepcopy(desc) k=0 jo=['wood'] for i,j in desc1.items(): if any(word in i.lower() for word in no) == True: print(i) del desc[i] # print(k) print(len(desc)) print(desc) # + id="pMqdbiY9Odum" colab_type="code" colab={} del dict1['evablon'] # + id="hk-AoDM2OQlO" colab_type="code" colab={} users5=open('/content/drive/My Drive/Colab Notebooks/Internship/Tweet Data/New/Thane/Active/activelatest.pickle','wb') real_tweets4=pickle.dump(desc,users5) users5.close() # + id="SSGvtvvcHJsE" colab_type="code" outputId="8a6bff69-615a-4d00-879d-9ffcfe029fef" colab={"base_uri": "https://localhost:8080/", "height": 589} u=0 for k,v in desc.items(): if u<=30: u=u+1 print(k)
final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: fengine # language: python # name: fengine # --- # ## Assembling a Feature Engineering Pipeline with Feature-engine and Cross-validation # # In this notebook, I will show you how to use Feature-engine within the Scikit-learn pipeline, utilizing cross-validation. # # I will: # - build a gradient boosted tree # - use feature-engine for the feature engineering steps # - set up an entire engineering and prediction pipeline using a Scikit-learn Pipeline # - train the pipeline with cross-validation, looking over different feature-engineering parameters # # ============================================================================ # # ## In this demo: # # We will use the titanic dataset, please refer to lecture **Datasets** in Section 1 of the course for instructions on how to download and prepare this dataset. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt # for the model from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import roc_auc_score from sklearn.pipeline import Pipeline # for feature engineering from feature_engine import imputation as mdi from feature_engine import discretisation as dsc from feature_engine import encoding as ce # + # load dataset cols = [ 'pclass', 'sex', 'age', 'sibsp', 'parch', 'fare', 'cabin', 'embarked', 'survived' ] data = pd.read_csv('../titanic.csv', usecols=cols) data.head() # + # Cabin: extract numerical and categorical part and delete original variable data['cabin_num'] = data['cabin'].str.extract('(\d+)') # captures numerical part data['cabin_num'] = data['cabin_num'].astype('float') data['cabin_cat'] = data['cabin'].str[0] # captures the first letter data.drop(['cabin'], axis=1, inplace=True) # + # make list of variables types # numerical: discrete and continuous discrete = [ var for var in data.columns if data[var].dtype != 'O' and var != 'survived' and data[var].nunique() < 10 ] continuous = [ var for var in data.columns if data[var].dtype != 'O' and var != 'survived' and var not in discrete ] # categorical categorical = [var for var in data.columns if data[var].dtype == 'O'] print('There are {} discrete variables'.format(len(discrete))) print('There are {} continuous variables'.format(len(continuous))) print('There are {} categorical variables'.format(len(categorical))) # + # separate into training and testing set X_train, X_test, y_train, y_test = train_test_split( data.drop('survived', axis=1), # predictors data['survived'], # target test_size=0.1, # percentage of obs in test set random_state=0) # seed to ensure reproducibility X_train.shape, X_test.shape # - # ### Set up the pipeline titanic_pipe = Pipeline([ # missing data imputation - section 4 ('imputer_num', mdi.ArbitraryNumberImputer(arbitrary_number=-1, variables=['age', 'fare', 'cabin_num'])), ('imputer_cat', mdi.CategoricalImputer(variables=['embarked', 'cabin_cat'])), # categorical encoding - section 6 ('encoder_rare_label', ce.RareLabelEncoder(tol=0.01, n_categories=2, variables=['embarked', 'cabin_cat'])), ('categorical_encoder', ce.OrdinalEncoder(encoding_method='ordered', variables=['cabin_cat', 'sex', 'embarked'])), # Gradient Boosted machine ('gbm', GradientBoostingClassifier(random_state=0)) ]) # ## Set up the Grid ad the cross-validation strategy # + # now we create the grid with all the parameters that we would like to test param_grid = { # try different feature engineering parameters 'imputer_num__arbitrary_number': [-1, 99], 'encoder_rare_label__tol': [0.1, 0.2], 'categorical_encoder__encoding_method': ['ordered', 'arbitrary'], # try different gradient boosted tree model paramenters 'gbm__max_depth': [None, 1, 3], } # now we set up the grid search with cross-validation grid_search = GridSearchCV(titanic_pipe, param_grid, cv=5, n_jobs=-1, scoring='roc_auc') # cv=5 is the cross-validation steps # no_jobs =-1 indicates to use all available cpus # scoring='roc-auc' indicates to evaluate the model performance with the roc-auc # for more details in the grid parameters visit: # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html # + # and now we train over all the possible combinations of the parameters above grid_search.fit(X_train, y_train) # and we print the best score over the train set print(("best roc-auc from grid search: %.3f" % grid_search.score(X_train, y_train))) # - # we can print the best estimator parameters like this grid_search.best_estimator_ # and find the best fit parameters like this grid_search.best_params_ # here we can see all the combinations evaluated during the gridsearch grid_search.cv_results_['params'] # and here the scores for each of one of the above combinations grid_search.cv_results_['mean_test_score'] # and finally let's check the performance over the test set print(("best linear regression from grid search: %.3f" % grid_search.score(X_test, y_test))) # + # let's explore the importance of the features importance = pd.Series(grid_search.best_estimator_['gbm'].feature_importances_) importance.index = data.drop('survived', axis=1).columns importance.sort_values(inplace=True, ascending=False) importance.plot.bar(figsize=(12,6))
13.03-Assembling-pipeline-with-crossvalidation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="_F_-mF6xYvNl" import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # + colab={} colab_type="code" id="wmQtdBcrYvNw" train = pd.read_csv('train_ltfs_finhack.csv') test = pd.read_csv('test_ltfs_finhack.csv') # + colab={} colab_type="code" id="Z06UPxNKYvN4" train_og = train.copy() test_og = test.copy() # + colab={} colab_type="code" id="hT8jlr_EhspD" outputId="c3881d57-2e34-43df-a8bf-ae2029343fb8" train.columns # - # Function to calculate missing values by column# Funct def missing_values_table(df): # Total missing values mis_val = df.isnull().sum() # Percentage of missing values mis_val_percent = 100 * df.isnull().sum() / len(df) # Make a table with the results mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1) # Rename the columns mis_val_table_ren_columns = mis_val_table.rename( columns = {0 : 'Missing Values', 1 : '% of Total Values'}) # Sort the table by percentage of missing descending mis_val_table_ren_columns = mis_val_table_ren_columns[ mis_val_table_ren_columns.iloc[:,1] != 0].sort_values( '% of Total Values', ascending=False).round(1) # Print some summary information print ("Your selected dataframe has " + str(df.shape[1]) + " columns.\n" "There are " + str(mis_val_table_ren_columns.shape[0]) + " columns that have missing values.") # Return the dataframe with missing information return mis_val_table_ren_columns # Missing values statistics missing_values = missing_values_table(train) missing_values.head(20) # + colab={} colab_type="code" id="m-xYaVA4YvOA" train['source'] = 'train' test['source'] = 'test' # + colab={} colab_type="code" id="Ce40iyzhYvOH" alldata = pd.concat([train, test], axis=0) # - sns.distplot(alldata.disbursed_amount) sns.distplot(alldata.ltv) sns.distplot(alldata.asset_cost) # + colab={} colab_type="code" id="Yw7pxO26YvOP" alldata['Employment.Type'].fillna(alldata['Employment.Type'].mode()[0],inplace=True) alldata['Employment.Type'].value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="L1Z0M8evYvOX" outputId="7a5bee5f-1525-44f7-a290-9a34b9008708" alldata['Age'] = alldata['Date.of.Birth'].apply(lambda x: 118 - int(x[-2:])) alldata['Age'].head() # + plt.style.use('fivethirtyeight') # Plot the distribution of ages in years plt.hist(alldata['Age'] , edgecolor = 'k', bins = 25) plt.title('Age of Client'); plt.xlabel('Age (years)'); plt.ylabel('Count'); # + plt.figure(figsize = (10, 8)) # KDE plot of loans that were repaid on time sns.kdeplot(alldata.loc[alldata['loan_default'] == 0, 'Age'] , label = 'target == 0') # KDE plot of loans which were not repaid on time sns.kdeplot(alldata.loc[alldata['loan_default'] == 1, 'Age'] , label = 'target == 1') # Labeling of plot plt.xlabel('Age (years)'); plt.ylabel('Density'); plt.title('Distribution of Ages'); # + # Age information into a separate dataframe age_data = alldata[['loan_default', 'Age']] age_data['Age'] = age_data['Age'] # Bin the age data age_data['YEARS_BINNED'] = pd.cut(age_data['Age'], bins = np.linspace(20, 70, num = 11)) age_data.head(10) # - # Group by the bin and calculate averages age_groups = age_data.groupby('YEARS_BINNED').mean() age_groups # + plt.figure(figsize = (8, 8)) # Graph the age bins and the average of the target as a bar plot plt.bar(age_groups.index.astype(str), 100 * age_groups['loan_default']) # Plot labeling plt.xticks(rotation = 75); plt.xlabel('Age Group (years)'); plt.ylabel('Failure to Repay (%)') plt.title('Failure to Repay by Age Group'); # + #Statistical Info for Age #alldata['Age'].describe() # + #Seems to be a zero in age #alldata[alldata['Age']==118].head() # + # 984 count for Age=0 #alldata['Age'].value_counts() # + #alldata['Age'].mean() # + colab={} colab_type="code" id="xmPiYCbeYvOl" alldata.drop('Date.of.Birth', axis=1, inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="QMajD_8xYvOt" outputId="73057497-a248-4dd9-ea14-f3739bc9cee9" alldata['CREDIT.HISTORY.LENGTH'] = alldata['CREDIT.HISTORY.LENGTH'].apply(lambda x: x.split(' ')[0]) alldata['CREDIT.HISTORY.LENGTH'] = alldata['CREDIT.HISTORY.LENGTH'].apply(lambda x: x.split('yrs')[0]) alldata['CREDIT.HISTORY.LENGTH'].head() # + colab={"base_uri": "https://localhost:8080/", "height": 289} colab_type="code" id="ISZ29iqrYvO4" outputId="15253530-92be-4590-8775-405e4ee3f3d2" alldata['Credit_History'] = alldata['CREDIT.HISTORY.LENGTH'].map(int) alldata['Credit_History'].head(15) # + colab={"base_uri": "https://localhost:8080/", "height": 1071} colab_type="code" id="UMToseENawe9" outputId="fbc44ab7-540f-4b60-9234-3a660b6f2f83" alldata['Credit_History'].map(lambda c: 1 if c>= 1 else 0) # + colab={} colab_type="code" id="MqbOZRPOYvPF" alldata.drop('CREDIT.HISTORY.LENGTH', axis=1, inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="R-cWOeCOYvPW" outputId="43b6d70c-b9a5-4c60-f8ef-982b02cb38de" alldata.select_dtypes('object').apply(pd.Series.nunique, axis=0) # + colab={"base_uri": "https://localhost:8080/", "height": 289} colab_type="code" id="0tOLVmJKYvPh" outputId="14950669-41b4-4dd1-e1c0-e5eaba418c35" alldata['AVERAGE.ACCT.AGE'] = alldata['AVERAGE.ACCT.AGE'].apply(lambda x: x.split(' ')[0]) alldata['AVERAGE.ACCT.AGE'] = alldata['AVERAGE.ACCT.AGE'].apply(lambda x: x.split('yrs')[0]) alldata['Avg_acct_yrs'] = alldata['AVERAGE.ACCT.AGE'].map(int) alldata['Avg_acct_yrs'].head(15) # + colab={} colab_type="code" id="6qV6uMymYvPz" alldata.drop('AVERAGE.ACCT.AGE', axis=1,inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="Q3cNbyMHYvRF" outputId="fe91e3e8-49fb-4a73-ac7e-d606b9891a41" alldata['cltv']=(alldata['PRI.DISBURSED.AMOUNT'] + alldata['SEC.DISBURSED.AMOUNT']) / alldata['asset_cost'] alldata['cltv'].head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="VJTZcpSmYvRS" outputId="6c3058f5-55fe-43e5-f54b-ac4b1900889a" alldata['ltv']=alldata['ltv']/100 alldata['ltv'].head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="npoXaflbYvSZ" outputId="42c09ac4-863b-4a94-f4d2-ad9e8c50aaba" alldata.select_dtypes('object').apply(pd.Series.nunique, axis=0) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="J7hZf03MYvTN" outputId="67829049-3230-4100-b4c1-3f802635ee77" alldata.shape # + colab={} colab_type="code" id="aBWh8YcRYvTf" from sklearn.preprocessing import LabelEncoder var_mod = ['PERFORM_CNS.SCORE.DESCRIPTION','Employment.Type','DisbursalDate'] le=LabelEncoder() for i in var_mod: alldata[i]=le.fit_transform(alldata[i]) if(i!='loan_default'): alldata[i]=le.fit_transform(alldata[i]) # - alldata['Current_pincode_ID'].nunique() # #### From the Data Dictionary provide, Current_pincode_ID is the current pincode of the customer. I decided to use this information to use in feature engineering. Using the assumption 7096 customers applied for numerous loans. # + colab={} colab_type="code" id="gSwhz6Exhsuw" alldata['pc_disbursed_sum']= alldata.groupby('Current_pincode_ID')['disbursed_amount'].transform('sum') alldata['pc_pri_no_accts_sum']= alldata.groupby('Current_pincode_ID')['PRI.NO.OF.ACCTS'].transform('sum') alldata['pc_pri_no_accts_count']= alldata.groupby('Current_pincode_ID')['PRI.NO.OF.ACCTS'].transform('count') alldata['pc_pri_overdue_accts_sum']= alldata.groupby('Current_pincode_ID')['PRI.OVERDUE.ACCTS'].transform('sum') alldata['pc_pri_overdue_accts_count']= alldata.groupby('Current_pincode_ID')['PRI.OVERDUE.ACCTS'].transform('count') alldata['pc_pri_active_accts_sum']= alldata.groupby('Current_pincode_ID')['PRI.ACTIVE.ACCTS'].transform('sum') alldata['pc_pri_active_accts_count']= alldata.groupby('Current_pincode_ID')['PRI.ACTIVE.ACCTS'].transform('count') alldata['pc_new_accts_six_sum']= alldata.groupby('Current_pincode_ID')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['pc_new_accts_six_count']= alldata.groupby('Current_pincode_ID')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['pc_delinq_accts_six_sum']= alldata.groupby('Current_pincode_ID')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['pc_delinq_accts_six_count']= alldata.groupby('Current_pincode_ID')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['pc_pri_current_bal_sum']= alldata.groupby('Current_pincode_ID')['PRI.CURRENT.BALANCE'].transform('sum') alldata['pc_pri_disbursed_sum']= alldata.groupby('Current_pincode_ID')['PRI.DISBURSED.AMOUNT'].transform('sum') alldata['pc_pri_sanctioned_sum']= alldata.groupby('Current_pincode_ID')['PRI.SANCTIONED.AMOUNT'].transform('sum') alldata['pc_pri_instal_sum']= alldata.groupby('Current_pincode_ID')['PRIMARY.INSTAL.AMT'].transform('sum') alldata['pc_cns_score']= alldata.groupby('Current_pincode_ID')['PERFORM_CNS.SCORE'].transform('max') alldata['pc_asset_cost_sum']= alldata.groupby('Current_pincode_ID')['asset_cost'].transform('sum') #sec alldata['pc_sec_no_accts_sum']= alldata.groupby('Current_pincode_ID')['SEC.NO.OF.ACCTS'].transform('sum') alldata['pc_sec_no_accts_count']= alldata.groupby('Current_pincode_ID')['SEC.NO.OF.ACCTS'].transform('count') alldata['pc_sec_overdue_accts_sum']= alldata.groupby('Current_pincode_ID')['SEC.OVERDUE.ACCTS'].transform('sum') alldata['pc_sec_overdue_accts_count']= alldata.groupby('Current_pincode_ID')['SEC.OVERDUE.ACCTS'].transform('count') alldata['pc_sec_active_accts_sum']= alldata.groupby('Current_pincode_ID')['SEC.ACTIVE.ACCTS'].transform('sum') alldata['pc_sec_active_accts_count']= alldata.groupby('Current_pincode_ID')['SEC.ACTIVE.ACCTS'].transform('count') alldata['pc_sec_current_bal_sum']= alldata.groupby('Current_pincode_ID')['SEC.CURRENT.BALANCE'].transform('sum') alldata['pc_sec_disbursed_sum']= alldata.groupby('Current_pincode_ID')['SEC.DISBURSED.AMOUNT'].transform('sum') alldata['pc_sec_sanctioned_sum']= alldata.groupby('Current_pincode_ID')['SEC.SANCTIONED.AMOUNT'].transform('sum') alldata['pc_sec_instal_sum']= alldata.groupby('Current_pincode_ID')['SEC.INSTAL.AMT'].transform('sum') # + colab={} colab_type="code" id="ODSLHekOlFw-" alldata['total_pc_pri_no_accts_count'] = alldata['pc_pri_no_accts_count'] +alldata['pc_sec_no_accts_count'] alldata['total_pc_pri_no_accts_sum']=alldata['pc_pri_no_accts_sum'] +alldata['pc_sec_no_accts_sum'] alldata['total_pc_disbursed']=alldata['pc_pri_disbursed_sum']+alldata['pc_sec_disbursed_sum'] alldata['total_pc_pri_instal_sum']=alldata['pc_pri_instal_sum']+alldata['pc_sec_instal_sum'] alldata['total_pc_cltv'] = alldata['total_pc_disbursed'] / alldata['pc_asset_cost_sum'] # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="MCHx8m08Ku1x" outputId="ec3093f9-a6d0-43f6-8c79-41f45c47c9a8" alldata['ltv_good']=alldata['ltv'].map(lambda c: 1 if c <= 0.80 else 0) alldata['ltv_good'].head() # + colab={} colab_type="code" id="nrT-SJfYhsu2" alldata['branch_disbursed_sum']= alldata.groupby('branch_id')['disbursed_amount'].transform('sum') alldata['branch_pri_no_accts_sum']= alldata.groupby('branch_id')['PRI.NO.OF.ACCTS'].transform('sum') alldata['branch_pri_no_accts_count']= alldata.groupby('branch_id')['PRI.NO.OF.ACCTS'].transform('count') alldata['branch_pri_overdue_accts_sum']= alldata.groupby('branch_id')['PRI.OVERDUE.ACCTS'].transform('sum') alldata['branch_pri_overdue_accts_count']= alldata.groupby('branch_id')['PRI.OVERDUE.ACCTS'].transform('count') alldata['branch_pri_active_accts_sum']= alldata.groupby('branch_id')['PRI.ACTIVE.ACCTS'].transform('sum') alldata['branch_pri_active_accts_count']= alldata.groupby('branch_id')['PRI.ACTIVE.ACCTS'].transform('count') alldata['branch_new_accts_six_sum']= alldata.groupby('branch_id')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['branch_new_accts_six_count']= alldata.groupby('branch_id')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['branch_delinq_accts_six_sum']= alldata.groupby('branch_id')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['branch_delinq_accts_six_count']= alldata.groupby('branch_id')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['branch_pri_current_bal_sum']= alldata.groupby('branch_id')['PRI.CURRENT.BALANCE'].transform('sum') alldata['branch_pri_disbursed_sum']= alldata.groupby('branch_id')['PRI.DISBURSED.AMOUNT'].transform('sum') alldata['branch_pri_sanctioned_sum']= alldata.groupby('branch_id')['PRI.SANCTIONED.AMOUNT'].transform('sum') alldata['branch_pri_instal_sum']= alldata.groupby('branch_id')['PRIMARY.INSTAL.AMT'].transform('sum') alldata['branch_cns_score']= alldata.groupby('branch_id')['PERFORM_CNS.SCORE'].transform('max') #sec alldata['branch_sec_no_accts_sum']= alldata.groupby('branch_id')['SEC.NO.OF.ACCTS'].transform('sum') alldata['branch_sec_no_accts_count']= alldata.groupby('branch_id')['SEC.NO.OF.ACCTS'].transform('count') alldata['branch_sec_overdue_accts_sum']= alldata.groupby('branch_id')['SEC.OVERDUE.ACCTS'].transform('sum') alldata['branch_sec_overdue_accts_count']= alldata.groupby('branch_id')['SEC.OVERDUE.ACCTS'].transform('count') alldata['branch_sec_active_accts_sum']= alldata.groupby('branch_id')['SEC.ACTIVE.ACCTS'].transform('sum') alldata['branch_sec_active_accts_count']= alldata.groupby('branch_id')['SEC.ACTIVE.ACCTS'].transform('count') alldata['branch_sec_current_bal_sum']= alldata.groupby('branch_id')['SEC.CURRENT.BALANCE'].transform('sum') alldata['branch_sec_disbursed_sum']= alldata.groupby('branch_id')['SEC.DISBURSED.AMOUNT'].transform('sum') alldata['branch_sec_sanctioned_sum']= alldata.groupby('branch_id')['SEC.SANCTIONED.AMOUNT'].transform('sum') alldata['branch_sec_instal_sum']= alldata.groupby('branch_id')['SEC.INSTAL.AMT'].transform('sum') # + colab={} colab_type="code" id="y03wxMsphsu7" alldata['supp_disbursed_sum']= alldata.groupby('supplier_id')['disbursed_amount'].transform('sum') alldata['supp_pri_no_accts_sum']= alldata.groupby('supplier_id')['PRI.NO.OF.ACCTS'].transform('sum') alldata['supp_pri_no_accts_count']= alldata.groupby('supplier_id')['PRI.NO.OF.ACCTS'].transform('count') alldata['supp_pri_overdue_accts_sum']= alldata.groupby('supplier_id')['PRI.OVERDUE.ACCTS'].transform('sum') alldata['supp_pri_overdue_accts_count']= alldata.groupby('supplier_id')['PRI.OVERDUE.ACCTS'].transform('count') alldata['supp_pri_active_accts_sum']= alldata.groupby('supplier_id')['PRI.ACTIVE.ACCTS'].transform('sum') alldata['supp_pri_active_accts_count']= alldata.groupby('supplier_id')['PRI.ACTIVE.ACCTS'].transform('count') alldata['supp_new_accts_six_sum']= alldata.groupby('supplier_id')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['supp_new_accts_six_count']= alldata.groupby('supplier_id')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['supp_delinq_accts_six_sum']= alldata.groupby('supplier_id')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['supp_delinq_accts_six_count']= alldata.groupby('supplier_id')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['supp_pri_current_bal_sum']= alldata.groupby('supplier_id')['PRI.CURRENT.BALANCE'].transform('sum') alldata['supp_pri_disbursed_sum']= alldata.groupby('supplier_id')['PRI.DISBURSED.AMOUNT'].transform('sum') alldata['supp_pri_sanctioned_sum']= alldata.groupby('supplier_id')['PRI.SANCTIONED.AMOUNT'].transform('sum') alldata['supp_pri_instal_sum']= alldata.groupby('supplier_id')['PRIMARY.INSTAL.AMT'].transform('sum') alldata['supp_cns_score']= alldata.groupby('supplier_id')['PERFORM_CNS.SCORE'].transform('max') #sec alldata['supp_sec_no_accts_sum']= alldata.groupby('supplier_id')['SEC.NO.OF.ACCTS'].transform('sum') alldata['supp_sec_no_accts_count']= alldata.groupby('supplier_id')['SEC.NO.OF.ACCTS'].transform('count') alldata['supp_sec_overdue_accts_sum']= alldata.groupby('supplier_id')['SEC.OVERDUE.ACCTS'].transform('sum') alldata['supp_sec_overdue_accts_count']= alldata.groupby('supplier_id')['SEC.OVERDUE.ACCTS'].transform('count') alldata['supp_sec_active_accts_sum']= alldata.groupby('supplier_id')['SEC.ACTIVE.ACCTS'].transform('sum') alldata['supp_sec_active_accts_count']= alldata.groupby('supplier_id')['SEC.ACTIVE.ACCTS'].transform('count') alldata['supp_sec_current_bal_sum']= alldata.groupby('supplier_id')['SEC.CURRENT.BALANCE'].transform('sum') alldata['supp_sec_disbursed_sum']= alldata.groupby('supplier_id')['SEC.DISBURSED.AMOUNT'].transform('sum') alldata['supp_sec_sanctioned_sum']= alldata.groupby('supplier_id')['SEC.SANCTIONED.AMOUNT'].transform('sum') alldata['supp_sec_instal_sum']= alldata.groupby('supplier_id')['SEC.INSTAL.AMT'].transform('sum') # + colab={} colab_type="code" id="9mxDa_d3hsvB" alldata['emp_disbursed_sum']= alldata.groupby('Employee_code_ID')['disbursed_amount'].transform('sum') alldata['emp_pri_no_accts_sum']= alldata.groupby('Employee_code_ID')['PRI.NO.OF.ACCTS'].transform('sum') alldata['emp_pri_no_accts_count']= alldata.groupby('Employee_code_ID')['PRI.NO.OF.ACCTS'].transform('count') alldata['emp_pri_overdue_accts_sum']= alldata.groupby('Employee_code_ID')['PRI.OVERDUE.ACCTS'].transform('sum') alldata['emp_pri_overdue_accts_count']= alldata.groupby('Employee_code_ID')['PRI.OVERDUE.ACCTS'].transform('count') alldata['emp_pri_active_accts_sum']= alldata.groupby('Employee_code_ID')['PRI.ACTIVE.ACCTS'].transform('sum') alldata['emp_pri_active_accts_count']= alldata.groupby('Employee_code_ID')['PRI.ACTIVE.ACCTS'].transform('count') alldata['emp_new_accts_six_sum']= alldata.groupby('Employee_code_ID')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['emp_new_accts_six_count']= alldata.groupby('Employee_code_ID')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['emp_delinq_accts_six_sum']= alldata.groupby('Employee_code_ID')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['emp_delinq_accts_six_count']= alldata.groupby('Employee_code_ID')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['emp_pri_current_bal_sum']= alldata.groupby('Employee_code_ID')['PRI.CURRENT.BALANCE'].transform('sum') alldata['emp_pri_disbursed_sum']= alldata.groupby('Employee_code_ID')['PRI.DISBURSED.AMOUNT'].transform('sum') alldata['emp_pri_sanctioned_sum']= alldata.groupby('Employee_code_ID')['PRI.SANCTIONED.AMOUNT'].transform('sum') alldata['emp_pri_instal_sum']= alldata.groupby('Employee_code_ID')['PRIMARY.INSTAL.AMT'].transform('sum') alldata['emp_cns_score']= alldata.groupby('Employee_code_ID')['PERFORM_CNS.SCORE'].transform('max') #sec alldata['emp_sec_no_accts_sum']= alldata.groupby('Employee_code_ID')['SEC.NO.OF.ACCTS'].transform('sum') alldata['emp_sec_no_accts_count']= alldata.groupby('Employee_code_ID')['SEC.NO.OF.ACCTS'].transform('count') alldata['emp_sec_overdue_accts_sum']= alldata.groupby('Employee_code_ID')['SEC.OVERDUE.ACCTS'].transform('sum') alldata['emp_sec_overdue_accts_count']= alldata.groupby('Employee_code_ID')['SEC.OVERDUE.ACCTS'].transform('count') alldata['emp_sec_active_accts_sum']= alldata.groupby('Employee_code_ID')['SEC.ACTIVE.ACCTS'].transform('sum') alldata['emp_sec_active_accts_count']= alldata.groupby('Employee_code_ID')['SEC.ACTIVE.ACCTS'].transform('count') alldata['emp_sec_current_bal_sum']= alldata.groupby('Employee_code_ID')['SEC.CURRENT.BALANCE'].transform('sum') alldata['emp_sec_disbursed_sum']= alldata.groupby('Employee_code_ID')['SEC.DISBURSED.AMOUNT'].transform('sum') alldata['emp_sec_sanctioned_sum']= alldata.groupby('Employee_code_ID')['SEC.SANCTIONED.AMOUNT'].transform('sum') alldata['emp_sec_instal_sum']= alldata.groupby('Employee_code_ID')['SEC.INSTAL.AMT'].transform('sum') # + colab={} colab_type="code" id="4z07mwKqhsvK" alldata['state_disbursed_sum']= alldata.groupby('State_ID')['disbursed_amount'].transform('sum') alldata['state_pri_no_accts_sum']= alldata.groupby('State_ID')['PRI.NO.OF.ACCTS'].transform('sum') alldata['state_pri_no_accts_count']= alldata.groupby('State_ID')['PRI.NO.OF.ACCTS'].transform('count') alldata['state_pri_overdue_accts_sum']= alldata.groupby('State_ID')['PRI.OVERDUE.ACCTS'].transform('sum') alldata['state_pri_overdue_accts_count']= alldata.groupby('State_ID')['PRI.OVERDUE.ACCTS'].transform('count') alldata['state_pri_active_accts_sum']= alldata.groupby('State_ID')['PRI.ACTIVE.ACCTS'].transform('sum') alldata['state_pri_active_accts_count']= alldata.groupby('State_ID')['PRI.ACTIVE.ACCTS'].transform('count') alldata['state_new_accts_six_sum']= alldata.groupby('State_ID')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['state_new_accts_six_count']= alldata.groupby('State_ID')['NEW.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['state_delinq_accts_six_sum']= alldata.groupby('State_ID')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('sum') alldata['state_delinq_accts_six_count']= alldata.groupby('State_ID')['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].transform('count') alldata['state_pri_current_bal_sum']= alldata.groupby('State_ID')['PRI.CURRENT.BALANCE'].transform('sum') alldata['state_pri_disbursed_sum']= alldata.groupby('State_ID')['PRI.DISBURSED.AMOUNT'].transform('sum') alldata['state_pri_sanctioned_sum']= alldata.groupby('State_ID')['PRI.SANCTIONED.AMOUNT'].transform('sum') alldata['state_pri_instal_sum']= alldata.groupby('State_ID')['PRIMARY.INSTAL.AMT'].transform('sum') alldata['state_cns_score']= alldata.groupby('State_ID')['PERFORM_CNS.SCORE'].transform('max') #sec alldata['state_sec_no_accts_sum']= alldata.groupby('State_ID')['SEC.NO.OF.ACCTS'].transform('sum') alldata['state_sec_no_accts_count']= alldata.groupby('State_ID')['SEC.NO.OF.ACCTS'].transform('count') alldata['state_sec_overdue_accts_sum']= alldata.groupby('State_ID')['SEC.OVERDUE.ACCTS'].transform('sum') alldata['state_sec_overdue_accts_count']= alldata.groupby('State_ID')['SEC.OVERDUE.ACCTS'].transform('count') alldata['state_sec_active_accts_sum']= alldata.groupby('State_ID')['SEC.ACTIVE.ACCTS'].transform('sum') alldata['state_sec_active_accts_count']= alldata.groupby('State_ID')['SEC.ACTIVE.ACCTS'].transform('count') alldata['state_sec_current_bal_sum']= alldata.groupby('State_ID')['SEC.CURRENT.BALANCE'].transform('sum') alldata['state_sec_disbursed_sum']= alldata.groupby('State_ID')['SEC.DISBURSED.AMOUNT'].transform('sum') alldata['state_sec_sanctioned_sum']= alldata.groupby('State_ID')['SEC.SANCTIONED.AMOUNT'].transform('sum') alldata['state_sec_instal_sum']= alldata.groupby('State_ID')['SEC.INSTAL.AMT'].transform('sum') # + colab={} colab_type="code" id="E-pKqnIKuE7d" alldata['pc_total_current_balance']=alldata['pc_pri_current_bal_sum']+alldata['pc_sec_current_bal_sum'] alldata['state_total_current_balance']=alldata['state_pri_current_bal_sum']+alldata['state_sec_current_bal_sum'] alldata['branch_total_current_balance']=alldata['branch_pri_current_bal_sum']+alldata['branch_sec_current_bal_sum'] alldata['supp_total_current_balance']=alldata['supp_pri_current_bal_sum']+alldata['supp_sec_current_bal_sum'] alldata['emp_total_current_balance']=alldata['emp_pri_current_bal_sum']+alldata['emp_sec_current_bal_sum'] # + colab={} colab_type="code" id="eamnFL0quy1V" alldata['pc_total_disbursed']=alldata['branch_pri_disbursed_sum']+alldata['branch_sec_disbursed_sum'] alldata['branch_total_disbursed']=alldata['branch_pri_disbursed_sum']+alldata['branch_sec_disbursed_sum'] alldata['supp_total_disbursed']=alldata['branch_pri_disbursed_sum']+alldata['branch_sec_disbursed_sum'] alldata['state_total_disbursed']=alldata['branch_pri_disbursed_sum']+alldata['branch_sec_disbursed_sum'] alldata['emp_total_disbursed']=alldata['branch_pri_disbursed_sum']+alldata['branch_sec_disbursed_sum'] # + colab={} colab_type="code" id="58gJUI7n_TF7" alldata['pc_total_paid']=alldata['pc_total_disbursed']- alldata['pc_total_current_balance'] alldata['state_total_paid']=alldata['state_total_current_balance']+alldata['state_total_disbursed'] alldata['branch_total_paid']=alldata['branch_total_disbursed'] - alldata['branch_total_current_balance'] alldata['supp_total_paid']=alldata['supp_total_current_balance']+alldata['supp_total_disbursed'] alldata['emp_total_paid']=alldata['emp_total_current_balance']+alldata['emp_total_disbursed'] # + colab={} colab_type="code" id="PBET3i7z2Vqq" alldata['total_branch_pri_no_accts_sum']=alldata['branch_pri_no_accts_sum'] +alldata['branch_sec_no_accts_sum'] alldata['total_supp_pri_no_accts_sum']=alldata['supp_pri_no_accts_sum'] +alldata['supp_sec_no_accts_sum'] alldata['total_state_pri_no_accts_sum']=alldata['state_pri_no_accts_sum'] +alldata['state_sec_no_accts_sum'] alldata['total_emp_pri_no_accts_sum']=alldata['branch_pri_no_accts_sum'] +alldata['branch_sec_no_accts_sum'] # + colab={} colab_type="code" id="q-WjC_yI1AcU" alldata['pc_total_no_inq']=alldata.groupby('Current_pincode_ID')['NO.OF_INQUIRIES'].transform('sum') alldata['branch_total_no_inq']=alldata.groupby('branch_id')['NO.OF_INQUIRIES'].transform('sum') alldata['supp_total_no_inq']=alldata.groupby('supplier_id')['NO.OF_INQUIRIES'].transform('sum') alldata['state_total_no_inq']=alldata.groupby('State_ID')['NO.OF_INQUIRIES'].transform('sum') alldata['emp_total_no_inq']=alldata.groupby('Employee_code_ID')['NO.OF_INQUIRIES'].transform('sum') # + colab={} colab_type="code" id="_D2vg00n1i7e" alldata['pc_total_overdue_accts']=alldata['pc_pri_overdue_accts_sum']+alldata['pc_sec_overdue_accts_sum'] alldata['branch_total_overdue_accts']=alldata['branch_pri_overdue_accts_sum']+alldata['branch_sec_overdue_accts_sum'] alldata['supp_total_overdue_accts']=alldata['supp_pri_overdue_accts_sum']+alldata['supp_sec_overdue_accts_sum'] alldata['state_total_overdue_accts']=alldata['state_pri_overdue_accts_sum']+alldata['state_sec_overdue_accts_sum'] alldata['emp_total_overdue_accts']=alldata['emp_pri_overdue_accts_sum']+alldata['emp_sec_overdue_accts_sum'] # + colab={} colab_type="code" id="XnwJfNUy2GsC" alldata['pc_no_overdue_by_total_accts']= alldata['pc_total_overdue_accts'] / alldata['total_pc_pri_no_accts_sum'] alldata['branch_no_overdue_by_total_accts']= alldata['branch_total_overdue_accts'] / alldata['total_branch_pri_no_accts_sum'] alldata['supp_no_overdue_by_total_accts']= alldata['supp_total_overdue_accts'] / alldata['total_supp_pri_no_accts_sum'] alldata['state_no_overdue_by_total_accts']= alldata['state_total_overdue_accts'] / alldata['total_state_pri_no_accts_sum'] alldata['emp_no_overdue_by_total_accts']= alldata['emp_total_overdue_accts'] / alldata['total_emp_pri_no_accts_sum'] # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="P8E3Rsylhsvj" outputId="5f37fc78-2315-477d-c9fd-00677bce8f63" alldata.shape # - # ###### We added quite a few variables using this method. My first approach I added the PRI and SEC totals and numerous other combonations. LGBM would not break that 0.65ish mark. After checking feature_importances these variables alone would always be at the top of the list. So I thought creating more variables from them would maybe help the model. I finally was able to break the 0.66 mark on the public LB during the last day. # + colab={} colab_type="code" id="8UGC2rayqzjf" alldata['disbursed_amount'] = np.log(alldata['disbursed_amount']) # + colab={} colab_type="code" id="C3Fq5M7NYvT4" alldata.drop('UniqueID', axis = 1,inplace=True) # + colab={} colab_type="code" id="L7fvECyRYvVE" train = alldata.loc[alldata['source']=='train'] test = alldata.loc[alldata['source']=='test'] # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="u5qFej1kYvVL" outputId="8286d9dd-fc06-4aa9-f9a5-38b1448548bc" train.shape, test.shape # + colab={} colab_type="code" id="dTzUlYhVYvVZ" train.drop('source',axis=1, inplace=True) test.drop('source',axis=1, inplace=True) # + colab={} colab_type="code" id="yVk5ZO7XhswQ" #train.to_csv('train_ltfs_mod.csv', index=False) #test.to_csv('test_ltfs_mod.csv', index=False) # + colab={} colab_type="code" id="ckSaaxpJYvVg" test = test.drop('loan_default', axis=1) # + colab={} colab_type="code" id="4_pAjDJKYvVm" X = train.drop('loan_default', 1) y = train.loan_default # + colab={} colab_type="code" id="kiyCdimQYvWC" import lightgbm as lgb from sklearn.model_selection import StratifiedKFold # + colab={} colab_type="code" id="2ZGjvc2kYvWL" #params were fintuned using BayesianOpt params={'num_leaves': 50, 'learning_rate':0.1, 'max_depth': 3, 'objective':'binary', 'metric':'auc', 'num_threads':-1, 'tree_learner': 'serial', 'verbosity':1, 'min_data_in_leaf': 1000, 'min_sum_hessian_in_leaf':455 } # + colab={"base_uri": "https://localhost:8080/", "height": 1037} colab_type="code" id="6EO9ieKlYvWk" outputId="fb5c1624-ee7d-4ca2-cb5f-a19589a167d5" i=1 kf = StratifiedKFold(n_splits = 5, random_state =420, shuffle=True) for train_index, test_index in kf.split(X,y): print('\n{} of kfold {}'.format(i,kf.n_splits)) x_tr, x_val = X.iloc[train_index], X.iloc[test_index] y_tr, y_val = y[train_index], y[test_index] tr_data = lgb.Dataset(x_tr, label=y_tr) val_data = lgb.Dataset(x_val, label=y_val) val_data = lgb.Dataset(X.iloc[test_index], label=y[test_index]) lgb_model1 = lgb.train(params,tr_data,25000, valid_sets=[tr_data, val_data], verbose_eval = 200, early_stopping_rounds=500 ) oof_lgb_pred_test = lgb_model1.predict(x_val) i += 1 lgb_pred_test = lgb_model1.predict(test) # - # #### This model scored our best. # #### Public LB: 0.6604528139 which put us in 112th/1342. # #### Private LB: 0.6631884673 for 118th/1342 # + #Showing first 41 important features per LGBM feature_imp = pd.DataFrame(sorted(zip(lgb_model1.feature_importance(),X.columns)), columns=['Value','Feature']) plt.figure(figsize=(40, 20)) sns.set(font_scale = 3) sns.barplot(x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)[0:40]) plt.title('LightGBM Features (avg over folds)') plt.tight_layout() plt.show() # + colab={} colab_type="code" id="Zlp4yFITYvW9" #submission = pd.read_csv('sample_submission_24jSKY6.csv') #submission['loan_default']= lgb_pred_test #submission['UniqueID'] = test_og['UniqueID'] #pd.DataFrame(submission, columns=['UniqueID', 'loan_default']).to_csv('best_finetune_1.csv', index=False)
LTFS_final_solution (1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %config IPCompleter.greedy=True from keras.models import load_model model = load_model("tf_fraud_detect_model") model.summary() # + import pandas as pd import numpy as np data = pd.read_csv("creditcard.csv") data.head() # + from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler scalar = StandardScaler() X = data.drop('Class', axis=1) y = data.Class X_train_v, X_test, y_train_v, y_test = train_test_split(X, y, test_size=0.3, random_state=42) X_train, X_validate, y_train, y_validate = train_test_split(X_train_v, y_train_v, test_size=0.2, random_state=42) X_train = scalar.fit_transform(X_train) X_validate = scalar.transform(X_validate) X_test = scalar.transform(X_test) # - print(f"TRAINING: X_train: {X_train.shape}, y_train: {y_train.shape}\n{'_'*55}") print(f"VALIDATION: X_validate: {X_validate.shape}, y_validate: {y_validate.shape}\n{'_'*50}") print(f"TESTING: X_test: {X_test.shape}, y_test: {y_test.shape}") X_train_d = np.expand_dims(X_train, -1) X_test_d = np.expand_dims(X_test, -1) X_validate_d = np.expand_dims(X_validate, -1) # + from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score def print_score(label, prediction, train=True): if train: print("Train Result:\n================================================") print(f"Accuracy Score: {accuracy_score(label, prediction) * 100:.2f}%") print("_______________________________________________") print("Classification Report:", end='') print(f"\tPrecision Score: {precision_score(label, prediction) * 100:.2f}%") print(f"\t\t\tRecall Score: {recall_score(label, prediction) * 100:.2f}%") print(f"\t\t\tF1 score: {f1_score(label, prediction) * 100:.2f}%") print("_______________________________________________") print(f"Confusion Matrix: \n {confusion_matrix(y_train, prediction)}\n") elif train==False: print("Test Result:\n================================================") print(f"Accuracy Score: {accuracy_score(label, prediction) * 100:.2f}%") print("_______________________________________________") print("Classification Report:", end='') print(f"\tPrecision Score: {precision_score(label, prediction) * 100:.2f}%") print(f"\t\t\tRecall Score: {recall_score(label, prediction) * 100:.2f}%") print(f"\t\t\tF1 score: {f1_score(label, prediction) * 100:.2f}%") print("_______________________________________________") print(f"Confusion Matrix: \n {confusion_matrix(label, prediction)}\n") # + y_train_pred = model.predict(X_train_d) y_test_pred = model.predict(X_test_d) print_score(y_train, y_train_pred.round(), train=True) print_score(y_test, y_test_pred.round(), train=False) # - # model.predict(X_test_d) X_test_d[0].flatten() model.predict(X_test_d[0])
ml/main/check_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1 Implement the K-means algorithm # ## OBS KOLLA N_INIT import numpy as np import pandas as ps from copy import deepcopy from random import randint from matplotlib import pyplot from sklearn.datasets import make_blobs import warnings from skimage import io import numpy as np import numpy.matlib import random import scipy.misc import os import imageio warnings.filterwarnings('ignore') # + # birb code def init_centroids(X,K): c = random.sample(list(X),K) cc = np.stack(c, axis=0 ) return c,cc def closest_centroids(X,c): K = np.size(c,0) idx = np.zeros((np.size(X,0),1)) arr = np.empty((np.size(X,0),1)) for i in range(0,K): y = c[i] temp = (np.ones((np.size(X,0),1))*y) b = np.power(np.subtract(X,temp),2) a = np.sum(b,axis = 1) a = np.asarray(a) a.resize((np.size(X,0),1)) #print(np.shape(a)) arr = np.append(arr, a, axis=1) arr = np.delete(arr,0,axis=1) idx = np.argmin(arr, axis=1) return idx def compute_centroids(X,idx,K): n = np.size(X,1) centroids = np.zeros((K,n)) for i in range(0,K): ci = idx==i ci = ci.astype(int) total_number = sum(ci); ci.resize((np.size(X,0),1)) total_matrix = np.matlib.repmat(ci,1,n) ci = np.transpose(ci) total = np.multiply(X,total_matrix) centroids[i] = (1/total_number)*np.sum(total,axis=0) return centroids def run_kMean(X,initial_centroids,max_iters): old_clusters = np.zeros(len(initial_centroids)) m = np.size(X,0) n = np.size(X,1) K = np.size(initial_centroids,0) centroids = initial_centroids previous_centroids = centroids idx = np.zeros((m,1)) error = 1 for i in range(max_iters): old_clusters = deepcopy(centroids) if error != 0: idx = closest_centroids(X,centroids) centroids = compute_centroids(X,idx,K) error = dist(centroids, old_clusters) # check for convergence else: break return centroids,idx def dist(a, b): #a = np.array(a) #b = np.array(b) return np.linalg.norm(a - b) # - def plot_in_col(x, number_of_clusters, p, new_clusters): for i in range(number_of_clusters): col_points = np.array([x[n] for n in range(len(x)) if p[n] == i]) pyplot.scatter(col_points[:, 0], col_points[:, 1], s=10) pyplot.scatter(new_clusters[:, 0], new_clusters[:, 1], marker='*', s=300, c='r') pyplot.title('K-means clusters') pyplot.xlabel('') pyplot.ylabel('') pyplot.show() # + def generate_fake_data(f_centers): x, y = make_blobs(n_samples=5000, centers=f_centers, n_features=2, random_state=195) xx = deepcopy(x) yy = deepcopy(y) return f_centers, x, xx, yy def plot_reg_data(x): # %matplotlib inline pyplot.scatter(x[:, 0], x[:, 1]) pyplot.title('Initial blobs') pyplot.xlabel('') pyplot.ylabel('') pyplot.show() def gen_rand_cluster(nub_cluster): n_features = 2 c_pos = generate_random_clusters(n_features, nub_cluster) c_pos = np.array(c_pos).T return c_pos def get_centroids(x, cent_pos): pyplot.scatter(x[:, 0], x[:, 1]) pyplot.scatter(cent_pos[:, 0], cent_pos[:, 1], marker='*', s=300, c='orange') pyplot.title('Initial centroids') pyplot.xlabel('') pyplot.ylabel('') pyplot.show() # - f_centers = 4 f_cent, x, xx, yy = generate_fake_data(f_centers) print(f_cent) plot_reg_data(x) K=4 cc_pos,cc_display=init_centroids(x,K) cc_pos get_centroids(x, cc_display) n_init=50 cent, idx = run_kMean(x, cc_pos, n_init) plot_in_col(x, f_cent, idx, cent) # + # ELBOW # + ''' Generating new clusters and calculating distance ''' nc_lst = [] # centroid pos p_lst = [] # cluster pos centroids_lst = [] # number of centroids per cluster k=5 for i in range(1,k): f_cent, x, xx, yy = generate_fake_data(i) cc_pos,cc_display=init_centroids(x,K) cent, idx = run_kMean(x, cc_pos, n_init) # appending positions to list nc_lst.append(cent) p_lst.append(idx) centroids_lst.append(x) # - def dist_2d(a,b): squared_distance = 0 for i in range(len(a)): squared_distance += (a[i] - b[i])**2 dist = np.sqrt(squared_distance) return dist sum_lst = [] for i in range(len(nc_lst)): # distance betwwn centroid and repective cluster test_sse_1 = dist_2d(nc_lst[i], p_lst[i]) # appending each distance to list sum_lst.append(test_sse_1) print(sum_lst) cc_sort = np.sort(nc_lst) cent_sort = np.sort(centroids_lst) ''' getting lowes value ''' #for c in range(len(sum_lst)): sum_sort = np.sort(np.array(sum_lst), axis=1) sum_sort # make one-dimn array and sort (decending) x_test = np.concatenate(sum_sort) x_test = np.sort(x_test)[::-1] print(x_test) pyplot.plot(np.log(x_test)) pyplot.title('Elbow method for optimal K') pyplot.xlabel('K-value') pyplot.ylabel('SSE') # Block below is for validating our elbow method against sklearn's built in # + from sklearn.cluster import KMeans from matplotlib import pyplot as plt X = x distorsions = [] for k in range(2, 20): k_means = KMeans(n_clusters=k) k_means.fit(X) distorsions.append(k_means.inertia_) fig = plt.figure(figsize=(15, 5)) plt.plot(range(2, 20), distorsions) plt.grid(True) plt.title('Elbow curve Sklearn') pyplot.xlabel('K-value') pyplot.ylabel('SSE') # - # # 2 Apply the K-means algorithm to compress an image image = io.imread('img_test.png') io.imshow(image) io.show() rows = image.shape[0] cols = image.shape[1] image = image/255 X = image.reshape(image.shape[0]*image.shape[1],3) K = 16 #number of clusters 16 start max_iters = 50 #number of times the k-mean should run initial_centroids, initial_centroids2 = init_centroids(X,K) centroids,idx = run_kMean(X,initial_centroids,max_iters) print(np.shape(centroids)) print(np.shape(idx)) idx = closest_centroids(X,centroids) X_recovered = centroids[idx] X_recovered = np.reshape(X_recovered, (rows, cols, 3)) imageio.imwrite('img_test_new16.png', X_recovered) image_compressed = io.imread('img_test_new16.png') io.imshow(image_compressed) io.show() info = os.stat('img_test.png') print("size before compressed: ",info.st_size/1024,"KB") info = os.stat('img_test_new16.png') print("size after compressed K=16: ",info.st_size/1024,"KB") ''' info = os.stat('img_test_new8.png') print("size after compressed K=8: ",info.st_size/1024,"KB") info = os.stat('img_test_new16.png') print("size after compressed K=16: ",info.st_size/1024,"KB") info = os.stat('img_test_new24.png') print("size after compressed K=24: ",info.st_size/1024,"KB") ''' # # 3 Use AIC and BIC to choose K for Gaussian Mixture Models from sklearn.datasets import load_breast_cancer data = load_breast_cancer().data data from sklearn.mixture import GaussianMixture # GMM gm = GaussianMixture(n_components=12, random_state=0).fit(data) LL=gm.score_samples(data) def aic_bic_sk(K,data): aic=[] bic=[] for k in range(1,K): gm = GaussianMixture(n_components=k, random_state=0).fit(data) x=gm.aic(data) x2=gm.bic(data) aic.append(x) bic.append(x2) return aic,bic aic,bic=aic_bic_sk(80,data) x=range(79) pyplot.plot(x,aic) pyplot.plot(x,bic) pyplot.title('Sk-learn AIC and BIC') pyplot.xlabel('Number of clusters (K)') pyplot.ylabel('Score') pyplot.plot(aic, 'b-', label="AIC") pyplot.plot(bic, 'r-', label="BIC") pyplot.legend() pyplot.show() # + tags=[] def aic_bic(K,data): aic=[] bic=[] for k in range(1,K): gm = GaussianMixture(n_components=k, random_state=0).fit(data) LL=gm.score(data) s,h=data.shape N=80 x=np.amax(LL) # AIC #return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters(X) #-2*x*s+2*x._n_ a = -2*x+2*k # fel # BIC # return (-2 * self.score(X) * X.shape[0] + self._n_parameters(X) * np.log(X.shape[0])) #b = -x+(1/2)*k*np.log(N) b = -2*x+k*np.log(N) bic.append(b*s) aic.append(a*s) return aic, bic # - print(data.shape) # + tags=[] kk = 80 aic2, bic2 = aic_bic(kk,data) # - print(aic2) print(bic2) pyplot.plot(x,aic2) pyplot.plot(x,bic2) pyplot.title('AIC and BIC') pyplot.xlabel('Number of clusters (K)') pyplot.ylabel('Score') pyplot.plot(aic2, 'b-', label="AIC") pyplot.plot(bic2, 'r-', label="BIC") pyplot.legend() pyplot.show() # + from sklearn import cluster from scipy.spatial import distance import sklearn.datasets from sklearn.preprocessing import StandardScaler import numpy as np def compute_bic(kmeans,X): # assign centers and labels centers = [kmeans.cluster_centers_] labels = kmeans.labels_ #number of clusters m = kmeans.n_clusters # size of the clusters n = np.bincount(labels) #size of data set N, d = X.shape #compute variance for all clusters beforehand cl_var = (1.0 / (N - m) / d) * sum([sum(distance.cdist(X[np.where(labels == i)], [centers[0][i]], 'euclidean')**2) for i in range(m)]) const_term = 0.5 * m * np.log(N) * (d+1) BIC = np.sum([n[i] * np.log(n[i]) - n[i] * np.log(N) - ((n[i] * d) / 2) * np.log(2*np.pi*cl_var) - ((n[i] - 1) * d/ 2) for i in range(m)]) - const_term return(BIC) # -
data science/DIT863 - stat/assignment_3/.ipynb_checkpoints/Assignment_3-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # KPMG TASK-1 import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # ### Exploring Transactions Sheet path=r'C:\Users\<NAME>\Downloads\KPMG_VI_New_raw_data_update_final.xlsx' df=pd.read_excel(path,sheet_name='Transactions') df.head() df.shape df.columns df.info() #convert date columns from integer to datetime df['transaction_date'] = pd.to_datetime(df['transaction_date'], unit='s') df['transaction_date'].head() #convert date columns from integer to datetime df['product_first_sold_date'] = pd.to_datetime(df['product_first_sold_date'], unit='s') df['product_first_sold_date'].head() df.describe() df.isnull().sum() # To Treat Missing Values, # The options we have are: # * **Drop Missing Values** # * **Impute Missing Values based on type of variable** # # We can decide on this during analysis based on objective. dups = df.duplicated() dups.sum() # ### Exploring Columns df['order_status'].value_counts() df['brand'].value_counts() df['product_line'].value_counts() df['product_class'].value_counts() df['product_size'].value_counts() df['transaction_id'].nunique() df['customer_id'].nunique() df.corr().T sns.heatmap(df.corr()) # ### Exploring Customer Demographic df2=pd.read_excel(path,sheet_name='CustomerDemographic') df2.head() df2.shape df2.columns df2.info() df2.describe() df2['customer_id'].nunique() df2.isnull().sum() # To Treat Missing Values, The options we have are: # # * **Drop Missing Values** # * **Impute Missing Values based on type of variable** # We can decide on this during analysis based on objective. df2['gender'].value_counts() # Replace inconsistent values with appropriate value df2['gender'] = df2['gender'].replace('F','Female').replace('M','Male').replace('Femal','Female').replace('U','Unspecified') df2['gender'].value_counts() df2['default'].value_counts() # ### The data doesnt seem to be right to process,so lets drop this column df2.drop('default',axis=1) df2.head() df2.corr() sns.heatmap(df.corr()) # ### Exploring Customer Address df3=pd.read_excel(path,sheet_name='CustomerAddress') df3.head() df3.shape df3.columns df3.info() df3.describe() df3['customer_id'].nunique() df3.isnull().sum() # There are no missing values here df3['address'].value_counts() df3['postcode'].value_counts() df3['state'].value_counts() df3['country'].value_counts() df3['property_valuation'].value_counts() df3.corr().T sns.heatmap(df.corr())
KPMG-TASK1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import scipy.sparse as sps import numpy as np from matplotlib import pyplot train_final = pd.read_csv('../../input/train_final.csv', sep='\t') train_final['interaction'] = 1 train_final = train_final.sort_values(['playlist_id', 'track_id'], ascending=[True, True]) numberInteractions = train_final.shape[0] print(train_final.iloc[:20,]) # we split each column into seperate list playlist_id = list(train_final['playlist_id']) track_id = list(train_final['track_id']) interaction = list(train_final['interaction']) print(playlist_id[0:10]) print(track_id[0:10]) print(interaction[0:10]) # + playlist_id_unique = list(set(playlist_id)) track_id_unique = list(set(track_id)) numPlaylists = len(playlist_id_unique) numTracks = len(track_id_unique) print ("Number of items\t {}, Number of users\t {}".format(numTracks, numPlaylists)) print ("Max ID items\t {}, Max Id users\t {}\n".format(max(track_id_unique), max(playlist_id_unique))) print ("Average interactions per user {:.2f}".format(numberInteractions/numPlaylists)) print ("Average interactions per item {:.2f}\n".format(numberInteractions/numTracks)) print ("Sparsity {:.2f} %".format((1-float(numberInteractions)/(numTracks*numPlaylists))*100)) # + URM_all = sps.coo_matrix((interaction, (playlist_id, track_id))) URM_all # - URM_all.tocsr() # #### Calculate item popularity itemPopularity = (URM_all>0).sum(axis=0) itemPopularity itemPopularity = np.array(itemPopularity).squeeze() itemPopularity itemPopularity = np.sort(itemPopularity) itemPopularity pyplot.plot(itemPopularity, 'ro') pyplot.ylabel('Num Interactions ') pyplot.xlabel('Item Index') pyplot.show() # + tenPercent = int(numTracks/10) print("Average per-item interactions over the whole dataset {:.2f}". format(itemPopularity.mean())) print("Average per-item interactions for the top 10% popular items {:.2f}". format(itemPopularity[-tenPercent].mean())) print("Average per-item interactions for the least 10% popular items {:.2f}". format(itemPopularity[:tenPercent].mean())) print("Average per-item interactions for the median 10% popular items {:.2f}". format(itemPopularity[int(numTracks*0.45):int(numTracks*0.55)].mean())) # - print("Number of items with zero interactions {}". format(np.sum(itemPopularity==0))) # + itemPopularityNonzero = itemPopularity[itemPopularity>0] tenPercent = int(len(itemPopularityNonzero)/10) print("Average per-item interactions over the whole dataset {:.2f}". format(itemPopularityNonzero.mean())) print("Average per-item interactions for the top 10% popular items {:.2f}". format(itemPopularityNonzero[-tenPercent].mean())) print("Average per-item interactions for the least 10% popular items {:.2f}". format(itemPopularityNonzero[:tenPercent].mean())) print("Average per-item interactions for the median 10% popular items {:.2f}". format(itemPopularityNonzero[int(numTracks*0.45):int(numTracks*0.55)].mean())) # - pyplot.plot(itemPopularityNonzero, 'ro') pyplot.ylabel('Num Interactions ') pyplot.xlabel('Item Index') pyplot.show() # #### User activity # + userActivity = (URM_all>0).sum(axis=1) userActivity = np.array(userActivity).squeeze() userActivity = np.sort(userActivity) pyplot.plot(userActivity, 'ro') pyplot.ylabel('Num Interactions ') pyplot.xlabel('User Index') pyplot.show() # - # #### Let's start with a random recommender class RandomRecommender(object): def fit(self, URM_train): self.numItems = URM_train.shape[0] def recommend(self, user_id, at=5): recommended_items = np.random.choice(self.numItems, at) return recommended_items # ### In order to evaluate our recommender we have to define: # * A splitting of the data in URM_train and URM_test # * An evaluation metric # * A functon computing the evaluation for each user # # ### The splitting of the data is very important to ensure your algorithm is evaluated in a realistic scenario by using test it has never seen. # + train_test_split = 0.80 numInteractions = URM_all.nnz train_mask = np.random.choice([True,False], numInteractions, p=[train_test_split, 1-train_test_split]) train_mask.shape # - unique, counts = np.unique(train_mask, return_counts=True) dict(zip(unique, counts)) # + playlist_id = np.array(playlist_id) track_id = np.array(track_id) interaction = np.array(interaction) URM_train = sps.coo_matrix((interaction[train_mask], (playlist_id[train_mask], track_id[train_mask]))) URM_train = URM_train.tocsr() URM_train # + test_mask = np.logical_not(train_mask) URM_test = sps.coo_matrix((interaction[test_mask], (playlist_id[test_mask], track_id[test_mask]))) URM_test = URM_test.tocsr() URM_test # - # ### Evaluation metric playlist = playlist_id_unique[1] playlist # + randomRecommender = RandomRecommender() randomRecommender.fit(URM_train) recommended_items = randomRecommender.recommend(playlist, at=5) recommended_items # - # #### We call items in the test set 'relevant' relevant_items = URM_test[playlist].indices relevant_items type(URM_test) is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True) is_relevant # ### Precision: how many of the recommended items are relevant def precision(recommended_items, relevant_items): is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True) precision_score = np.sum(is_relevant, dtype=np.float32) / len(is_relevant) return precision_score # ### Recall: how many of the relevant items I was able to recommend def recall(recommended_items, relevant_items): is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True) recall_score = np.sum(is_relevant, dtype=np.float32) / relevant_items.shape[0] return recall_score # ### Mean Average Precision def MAP(recommended_items, relevant_items): is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True) # Cumulative sum: precision at 1, at 2, at 3 ... p_at_k = is_relevant * np.cumsum(is_relevant, dtype=np.float32) / (1 + np.arange(is_relevant.shape[0])) map_score = np.sum(p_at_k) / np.min([relevant_items.shape[0], is_relevant.shape[0]]) return map_score # ### And let's test it! # + # We pass as paramether the recommender class def evaluate_algorithm(URM_test, recommender_object, at=5): cumulative_precision = 0.0 cumulative_recall = 0.0 cumulative_MAP = 0.0 num_eval = 0 for user_id in playlist_id_unique: relevant_items = URM_test[user_id].indices if len(relevant_items)>0: recommended_items = recommender_object.recommend(user_id, at=at) num_eval+=1 cumulative_precision += precision(recommended_items, relevant_items) cumulative_recall += recall(recommended_items, relevant_items) cumulative_MAP += MAP(recommended_items, relevant_items) cumulative_precision /= num_eval cumulative_recall /= num_eval cumulative_MAP /= num_eval print("Recommender performance is: Precision = {:.4f}, Recall = {:.4f}, MAP = {:.4f}".format( cumulative_precision, cumulative_recall, cumulative_MAP)) # - evaluate_algorithm(URM_test, randomRecommender) # # Top Popular recommender # # #### We recommend to all users the most popular items, that is those with the highest number of interactions # #### In this case our model is the item popularity class TopPopRecommender(object): def fit(self, URM_train): itemPopularity = (URM_train>0).sum(axis=0) itemPopularity = np.array(itemPopularity).squeeze() # We are not interested in sorting the popularity value, # but to order the items according to it self.popularItems = np.argsort(itemPopularity) self.popularItems = np.flip(self.popularItems, axis = 0) def recommend(self, user_id, at=5): recommended_items = self.popularItems[0:at] return recommended_items topPopRecommender = TopPopRecommender() topPopRecommender.fit(URM_train) for user_id in playlist_id_unique[0:10]: print(topPopRecommender.recommend(user_id, at=5)) evaluate_algorithm(URM_test, topPopRecommender, at=5) # + class TopPopRecommender(object): def fit(self, URM_train): self.URM_train = URM_train itemPopularity = (URM_train>0).sum(axis=0) itemPopularity = np.array(itemPopularity).squeeze() # We are not interested in sorting the popularity value, # but to order the items according to it self.popularItems = np.argsort(itemPopularity) self.popularItems = np.flip(self.popularItems, axis = 0) def recommend(self, user_id, at=5, remove_seen=True): if remove_seen: unseen_items_mask = np.in1d(self.popularItems, self.URM_train[user_id].indices, assume_unique=True, invert = True) unseen_items = self.popularItems[unseen_items_mask] recommended_items = unseen_items[0:at] else: recommended_items = self.popularItems[0:at] return recommended_items # + topPopRecommender_removeSeen = TopPopRecommender() topPopRecommender_removeSeen.fit(URM_train) for user_id in playlist_id_unique[0:10]: print(topPopRecommender_removeSeen.recommend(user_id, at=5)) # - evaluate_algorithm(URM_test, topPopRecommender_removeSeen)
notebooks/PracticeSession/Practice2_work.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import littletable as lt from IPython.display import HTML, display # shortcut for displaying tables as HTML inside Notebook _HTML = lambda tbl, fields='*': display(HTML(tbl.as_html(fields))) # - # import from csv, convert string fields to ints and floats # (convert elevation from meters to feet) # us_ppl.csv is an extract of population data from the GeoNames database of US place # names (www.geonames.org), licensed under the Creative Commons Attribution 4.0 License us_ppl = lt.Table().csv_import('us_ppl.csv', transforms={'pop': int, 'lat': float, 'long': float, 'elev': lambda s:int(int(s)*328/100+0.5)}) # define index by state, since we will do many analyses and pivots by state # (may only pivot on indexed attributes) us_ppl.create_index('state') # display summary info about this table us_ppl.info() # drop columns that are not so interesting us_ppl = us_ppl.select("-countycode -featclass -feature -tz -wikilink") # display a table of the top 20 places in Texas by population _HTML(us_ppl.by.state['TX'].sort("pop desc")[:20], "id name lat long elev pop") # what are all the states in the input data file us_ppl.by.state.keys() # create a pivot table by state piv = us_ppl.pivot("state") # compute the total population by state, and output as a table _HTML(piv.summarize(lambda recs: sum(r.pop for r in recs), "population")) # compute the total population by state, and output as a table _HTML(piv.as_table(sum, 'pop').sort("pop desc"), "state pop") # add a computed field 'elev000', elevation rounded down by 1000's us_ppl.add_field('elev000', lambda x: int(x.elev/1000)*1000, 0) # create index on elev000 and pivot us_ppl.create_index('elev000') piv = us_ppl.pivot('elev000') # create a table of the summary counts of the pivot, add a label field and display as a table ppl_by_elev = piv.as_table(sum, 'pop') ppl_by_elev.add_field("elevation", lambda rec: "{}-{}".format(rec.elev000, rec.elev000+1000)) _HTML(ppl_by_elev, "elevation pop") # summarize population by state and elevation piv = us_ppl.pivot('state elev000') # display table of residents in each state by elevation _HTML(piv.summarize(lambda recs: sum(r.pop for r in recs))) # what is the average resident elevation by state? piv = us_ppl.pivot('state') _HTML(piv.summarize(lambda recs: int(sum(r.pop*r.elev for r in recs)/sum(r.pop for r in recs)), col_label='ave elevation'))
examples/Littletable PPL demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to Statistics in Spreadsheets # # - 4 hours # - 15 Videos # - 51 Exercises # # ## Course Description # # Statistics is the science that deals with the collection, analysis, and interpretation of data. Having a solid foundation in statistics will help you effectively work with your data to test hypotheses and uncover insights that can help solve your problems. This course is designed to give you that foundation in statistics. Using Spreadsheets functions, you'll dive into averages, distributions, hypothesis testing, and conclude the course by applying your newfound knowledge in a case study. Along the way, you'll work with a variety of datasets ranging from eBay auctions to train ridership to historical presidential approval ratings. Enjoy! # # ### 1 Getting To Know Your Data # # Begin your journey by learning why and how to summarize your data using statistics such as the mean, median, and mode. While working with a variety of datasets ranging from Amazon revenue to U.S Presidential ratings, you'll learn about the differences between each of these fundamental statistics and most importantly, when to use each. # # - Welcome to the course! # - Don't be average! # - Presidential approval rating averages # - Difference between median and mean # - Modal madness # - How far from average? # - Train variation # - Calculating standard deviations # - Playing quarters # - Standardizing data # - Comparing z-scores # - Exploring eBay auctions # # ### 2 Statistical Data Visualization # # Data visualization is one of the most important parts of any data science workflow. It leads to a deeper understanding of your dataset which in turn allows you to more effectively communicate results to stakeholders. In this chapter, you'll learn how to visualize your data in Spreadsheets using statistical plots such as the histogram, scatter plot, and bar plot. # # - Visualizing Distributions # - "Normal" views of money # - Visualizing customer longevity # - Visualizing customer donations # - Is the data "normally" distributed? # - Visualizing Correlations # - Correlation between price and quantity sold # - Correlation between seller rating and closing price # - Adding a trend line # - Bar charts # - Bar chart of competitive counts # - Visualizing categories # # ### 3 Statistical Hypothesis Testing # # This chapter introduces you to statistical hypothesis testing. You'll learn how to construct a hypothesis, test it using different statistical tests, and properly interpret the results. # # - Central to Stats: Sampling! # - Sampling in Spreadsheets # - Does sampling size matter? # - Central Limit Theorem in action # - Hypothesis Testing # - Comparing samples with a t-test # - Paired t-test # - Hypothesis Testing with the Z-test # - Performing a Z-test # - What changes in a two-tailed test? # - Hypothesis Testing with the Chi-squared test # - Performing a chi-squared test # - Are bank loans getting worse? # # ### 4 Case Study: Dating Profile Analysis # # The final stretch! Apply all of your newfound statistical knowledge and solidify everything you have learned by working through a case study consisting of online dating profile data. # # - Dating Data! # - Understanding the distribution of ages # - What's the drinking age? # - Profile login behavior # - Visuals & Distributions # - Visualizing logins # - How old do users look? # - Tipping the scale to positive correlation # - Investigating age and volunteering # - More complex relationships # - Are gender and number of roommates independent? # - Getting old and rich # - Multiple relationships! # - Congratulations!
introduction_to_statistics_in_spreadsheets/summary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Summary: # This notebook contains the soft smoothing figures for Amherst (Figure 3(c)). # ## Load libraries # + # import packages from __future__ import division import networkx as nx import numpy as np import os from sklearn import metrics from sklearn.preprocessing import label_binarize from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import ShuffleSplit import matplotlib.pyplot as plt import itertools from numpy.linalg import inv ## function to create + save dictionary of features def create_dict(key, obj): return(dict([(key[i], obj[i]) for i in range(len(key))])) # - # ## load helper functions and dataset # + # set the working directory and import helper functions #get the current working directory and then redirect into the functions under code cwd = os.getcwd() # parents working directory of the current directory: which is the code folder parent_cwd = os.path.dirname(cwd) # get into the functions folder functions_cwd = parent_cwd + '/functions' # change the working directory to be .../functions os.chdir(functions_cwd) # import all helper functions exec(open('parsing.py').read()) exec(open('ZGL.py').read()) exec(open('create_graph.py').read()) exec(open('ZGL_softing_new_new.py').read()) exec(open('one_hop_majority_vote.py').read()) exec(open('iterative_method_test.py').read()) #exec(open('iterative_method_test_new.py').read()) exec(open('decoupling_prepare.py').read()) # import the data from the data folder data_cwd = os.path.dirname(parent_cwd)+ '/data' # change the working directory and import the fb dataset fb100_file = data_cwd +'/Amherst41' A, metadata = parse_fb100_mat_file(fb100_file) # change A(scipy csc matrix) into a numpy matrix adj_matrix_tmp = A.todense() #get the gender for each node(1/2,0 for missing) gender_y_tmp = metadata[:,1] # get the corresponding gender for each node in a disctionary form gender_dict = create_dict(range(len(gender_y_tmp)), gender_y_tmp) (graph, gender_y) = create_graph(adj_matrix_tmp,gender_dict,'gender',0,None,'yes') # - # ## Setup percent_initially_unlabelled = [0.99,0.95,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1,0.05] percent_initially_labelled = np.subtract(1, percent_initially_unlabelled) n_iter = 10 cv_setup='stratified' # ## 1-hop and 2-hop MV method # + # 1-hop and 2-hop initialization A = nx.adjacency_matrix(graph) adj_matrix_gender = np.matrix(A.todense()) n = len(gender_y) keys = list(graph.node) # see how many classes are there and rearrange them classes = np.sort(np.unique(gender_y)) class_labels = np.array(range(len(classes))) # relabel membership class labels - for coding convenience # preserve ordering of original class labels -- but force to be in sequential order now gender_y_update = np.copy(gender_y) for j in range(len(classes)): gender_y_update[gender_y_update == classes[j]] = class_labels[j] gender_y_update = np.array(gender_y_update) mean_accuracy_one = [] se_accuracy_one = [] mean_micro_auc_one = [] se_micro_auc_one = [] mean_wt_auc_one = [] se_wt_auc_one = [] mean_accuracy_two = [] se_accuracy_two = [] mean_micro_auc_two = [] se_micro_auc_two = [] mean_wt_auc_two = [] se_wt_auc_two = [] for i in range(len(percent_initially_labelled)): print(percent_initially_unlabelled[i]) (graph_new, gender_y_new) = create_graph(adj_matrix_tmp,gender_dict,'gender',0,None,'yes') adj_matrix_gender = np.array(nx.adjacency_matrix(graph_new).todense()) gender_dict_input = create_dict(range(len(gender_y_new)), gender_y_new) (graph_input, gender_y_input) = create_graph(adj_matrix_gender,gender_dict_input,'gender',5,None,'yes') keys = list(graph_input.node()) bench_mark = np.mean(np.array(gender_y_input) == np.max(class_labels)) if cv_setup=='stratified': k_fold = StratifiedShuffleSplit(n_splits=n_iter,test_size=percent_initially_unlabelled[i], random_state=1) else: k_fold = cross_validation.ShuffleSplit(n_splits=n_iter, test_size=percent_initially_unlabelled[i], random_state=1) accuracy_one = [] micro_auc_one = [] wt_auc_one = [] accuracy_two = [] micro_auc_two = [] wt_auc_two = [] # update rule for theta for train_index, test_index in k_fold.split(keys, gender_y_input): accuracy_score_benchmark = np.mean(np.array(gender_y_input)[train_index] == np.max(class_labels)) # get 1-hop result (theta_one_tmp, micro_auc_one_tmp, wt_auc_one_tmp, accuracy_one_tmp) = one_hop_majority_vote(graph_input, gender_y_input,train_index, test_index) accuracy_one.append(accuracy_one_tmp) micro_auc_one.append(micro_auc_one_tmp) wt_auc_one.append(wt_auc_one_tmp) # get 2-hop result bench_mark = gender_y_input/len(gender_y_input) (theta_two_tmp, micro_auc_two_tmp, wt_auc_two_tmp, accuracy_two_tmp) = iterative_method_test(1, graph_input, gender_y_input, gender_y_input, train_index, test_index, bench_mark) accuracy_two.append(accuracy_two_tmp) micro_auc_two.append(micro_auc_two_tmp) wt_auc_two.append(wt_auc_two_tmp) # get the mean and standard deviation mean_accuracy_one.append(np.mean(accuracy_one)) se_accuracy_one.append(np.std(accuracy_one)) mean_micro_auc_one.append(np.mean(micro_auc_one)) se_micro_auc_one.append(np.std(micro_auc_one)) mean_wt_auc_one.append(np.mean(wt_auc_one)) se_wt_auc_one.append(np.std(wt_auc_one)) # get the mean and standard deviation mean_accuracy_two.append(np.mean(accuracy_two)) se_accuracy_two.append(np.std(accuracy_two)) mean_micro_auc_two.append(np.mean(micro_auc_two)) se_micro_auc_two.append(np.std(micro_auc_two)) mean_wt_auc_two.append(np.mean(wt_auc_two)) se_wt_auc_two.append(np.std(wt_auc_two)) # - # ## decoupled smoothing method (with parameter 0.1) # + sigma_square = 0.1 (graph, gender_y) = create_graph(adj_matrix_tmp,gender_dict,'gender',0,None,'yes') A_tilde = decoupling_prepare(graph,sigma_square) (mean_accuracy_decoupling_Amherst_01,se_accuracy_decoupling_Amherst_01,mean_micro_auc_decoupling_Amherst_01,se_micro_auc_decoupling_Amherst_01,mean_wt_auc_decoupling_Amherst_01,se_wt_auc_decoupling_Amherst_01) = ZGL(np.array(A_tilde),gender_y,percent_initially_unlabelled, n_iter, cv_setup) # - # ## hard smoothing method (ZGL) # + adj_matrix_tmp_ZGL = adj_matrix_tmp (graph, gender_y) = create_graph(adj_matrix_tmp_ZGL,gender_dict,'gender',0,None,'yes') # run ZGL (mean_accuracy_zgl_Amherst, se_accuracy_zgl_Amherst, mean_micro_auc_zgl_Amherst,se_micro_auc_zgl_Amherst, mean_wt_auc_zgl_Amherst,se_wt_auc_zgl_Amherst) =ZGL(np.array(adj_matrix_gender), np.array(gender_y),percent_initially_unlabelled, n_iter,cv_setup) # - # ## Plot # AUC against Initial unlabled node precentage # + # %matplotlib inline from matplotlib.ticker import FixedLocator,LinearLocator,MultipleLocator, FormatStrFormatter fig = plt.figure() #seaborn.set_style(style='white') from mpl_toolkits.axes_grid1 import Grid grid = Grid(fig, rect=111, nrows_ncols=(1,1), axes_pad=0.1, label_mode='L') for i in range(4): if i == 0: grid[i].xaxis.set_major_locator(FixedLocator([0,25,50,75,100])) grid[i].yaxis.set_major_locator(FixedLocator([0.4, 0.5,0.6,0.7,0.8,0.9,1])) grid[i].errorbar(percent_initially_labelled*100,mean_wt_auc_zgl_Amherst, yerr=se_wt_auc_zgl_Amherst, fmt='--o', capthick=2, alpha=1, elinewidth=3, color='orange') grid[i].errorbar(percent_initially_labelled*100, mean_wt_auc_one, yerr=se_wt_auc_one, fmt='--o', capthick=2, alpha=1, elinewidth=3, color='red') grid[i].errorbar(percent_initially_labelled*100, mean_wt_auc_two, yerr=se_wt_auc_one, fmt='--o', capthick=2, alpha=1, elinewidth=3, color='maroon') grid[i].errorbar(percent_initially_labelled*100, mean_wt_auc_decoupling_Amherst_01, yerr=se_wt_auc_decoupling_Amherst_01, fmt='--o', capthick=2, alpha=1, elinewidth=3, color='black') grid[i].set_ylim(0.3,1.1) grid[i].set_xlim(0,101) grid[i].annotate('1-hop MV', xy=(3, 0.80), color='red', alpha=1, size=12) grid[i].annotate('2-hop MV', xy=(3, 0.84), color='maroon', alpha=1, size=12) grid[i].annotate('hard smoothing', xy=(3, 0.88), color='orange', alpha=1, size=12) grid[i].annotate('decoupled smoothing', xy=(3, 0.92), color='black', alpha=1, size=12) grid[i].set_ylim(0.4,1.01) grid[i].set_xlim(0,100) grid[i].spines['right'].set_visible(False) grid[i].spines['top'].set_visible(False) grid[i].tick_params(axis='both', which='major', labelsize=13) grid[i].tick_params(axis='both', which='minor', labelsize=13) grid[i].set_xlabel('Percent of Nodes Initially Labeled').set_fontsize(15) grid[i].set_ylabel('AUC').set_fontsize(15) grid[0].set_xticks([0,25, 50, 75, 100]) grid[0].set_yticks([0.4,0.6,0.8,1]) grid[0].minorticks_on() grid[0].tick_params('both', length=4, width=1, which='major', left=1, bottom=1, top=0, right=0) # -
code/decoupled_smoothing_regularization/decoupled_smoothing_VS_others-Amherst.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.9 64-bit # language: python # name: python3 # --- 90 % 0.5 test = 5 test /= 5 test # + class Test(): x = 99 def test_2(test): test.x = 5 t = Test() print(t.x) test_2(t) t.x # -
testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''azureml_mnist'': conda)' # name: python388jvsc74a57bd00c6d11fbad74d6301b047179d3548bf1ec22794fad1007561535a8c5333c5ccf # --- # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import azureml.core from azureml.core import Workspace # check core SDK version number print("Azure ML SDK Version: ", azureml.core.VERSION) # - # load workspace configuration from the config.json file in the current folder. ws = Workspace.from_config() print(ws.name, ws.location, ws.resource_group, sep='\t') # + from azureml.core import Experiment experiment_name = 'mnist-automl' exp = Experiment(workspace=ws, name=experiment_name) # - # # Download MNIST dataset # + from azureml.core import Dataset from azureml.opendatasets import MNIST data_folder = os.path.join(os.getcwd(), 'data') os.makedirs(data_folder, exist_ok=True) mnist_file_dataset = MNIST.get_file_dataset() mnist_file_dataset.download(data_folder, overwrite=True) mnist_file_dataset = mnist_file_dataset.register(workspace=ws, name='mnist_opendataset', description='training and test dataset', create_new_version=True) # - # # Display some sample images # + import gzip import numpy as np import struct import glob import pandas as pd def load_data(filename, label=False): with gzip.open(filename) as gz: struct.unpack('I', gz.read(4)) n_items = struct.unpack('>I', gz.read(4)) if not label: n_rows = struct.unpack('>I', gz.read(4))[0] n_cols = struct.unpack('>I', gz.read(4))[0] res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype=np.uint8) res = res.reshape(n_items[0], n_rows * n_cols) else: res = np.frombuffer(gz.read(n_items[0]), dtype=np.uint8) res = res.reshape(n_items[0], 1) return res # note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster. X_train = load_data(glob.glob(os.path.join(data_folder,"**/train-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0 X_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0 y_train = load_data(glob.glob(os.path.join(data_folder,"**/train-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1) y_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1) # + project_folder = 'project_folder' if not os.path.exists('project_folder'): os.makedirs('project_folder') # - X_train.shape, y_train.shape # + # Reduce size of data set to reduce the compute time X_train = X_train[:100] y_train = y_train[:100] X_train.shape, y_train.shape # + X = pd.DataFrame(X_train) y = pd.DataFrame(y_train) label = 'digit' X[label] = y training_data = X training_data.to_csv('data/digits.csv') ds = ws.get_default_datastore() ds.upload(src_dir='./data', target_path='digitsdata', overwrite=True, show_progress=True) training_data = Dataset.Tabular.from_delimited_files(path=ds.path('digitsdata/digits.csv')) # - # # Configure AutoML experiment # + from azureml.train.automl import AutoMLConfig import time import logging compute_target = 'low-prio-compute' automl_settings = { "name": "AutoML_Demo_Experiment_{0}".format(time.time()), "experiment_timeout_minutes" : 20, "enable_early_stopping" : True, "iteration_timeout_minutes": 10, "n_cross_validations": 5, "primary_metric": 'AUC_weighted', "max_concurrent_iterations": 10, } automl_config = AutoMLConfig(task='classification', debug_log='automl_errors.log', path=project_folder, compute_target=compute_target, training_data=training_data, label_column_name=label, **automl_settings, ) # - # # Submit AutoML Experiment from azureml.core.experiment import Experiment experiment = Experiment(ws, 'mnist-automl') remote_run = experiment.submit(automl_config, show_output=True) # # Explore results from azureml.widgets import RunDetails RunDetails(remote_run).show()
azure_ml/auto_ml/mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #Run once per session # !pip install fastai -q # # A walk with the internal API # # * What is a `PILImage` # * How does it work? # * What are these "blocks" and how do they relate? # # Today we will go over an example with `ImageBlock` and `PointBlock` from fastai.vision.all import * # We'll use a cat image url = 'https://upload.wikimedia.org/wikipedia/commons/a/a3/June_odd-eyed-cat.jpg' download_url(url, 'cat.jpg') # What is `PILImage`? Let's look at the code class PILImage(PILBase): pass # Okay.. that does nothing. Where do I go from here? We inherit from `PILBase`, let's try that! class PILBase(Image.Image, metaclass=BypassNewMeta): _bypass_type=Image.Image _show_args = {'cmap':'viridis'} _open_args = {'mode': 'RGB'} @classmethod def create(cls, fn:(Path,str,Tensor,ndarray,bytes), **kwargs)->None: "Open an `Image` from path `fn`" if isinstance(fn,Tensor): fn = fn.numpy() if isinstance(fn,ndarray): return cls(Image.fromarray(fn)) if isinstance(fn,bytes): fn = io.BytesIO(fn) return cls(load_image(fn, **merge(cls._open_args, kwargs))) def show(self, ctx=None, **kwargs): "Show image using `merge(self._show_args, kwargs)`" return show_image(self, ctx=ctx, **merge(self._show_args, kwargs)) # That looks better. WHhat all does this mean? # + # Image.Image?? # - # `Image.Image` means a `PIL` based image is inherited # Any time we have a datatype we want to use, we need a `create` and `show` function. `create` prepares the file for converting to a `tensor`, etc. `show` is our show method. im = PILImage.create('cat.jpg') im.show() # So what have we learned? Each item type needs a `create` and a `show` method. How does this relate to `ImageBlock`? # + # ImageBlock?? # - def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor) # Now we're getting somewhere. If we want to use the `DataBlock`, each inherit from a `TransformBlock` block = TransformBlock(type_tfms=PILImage.create, batch_tfms=IntToFloatTensor) # How would this convert over to a non-image? Let's look at a simple verion, points! # # Points # If we take a look at the `PointBlock` object, we see the following: PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler) # So let's break this down into two parts, the `TensorPoint` and the `PointScaler` # ## TensorPoint # The goal of the `TensorPoint` is to turn a list of points into a `tensor` that we can work with, that is **it**. Nothing about transforms, just generating some form of a **raw** input. Let's try building our own based on what we saw earlier, and then see how close we got to the source code class myTensorPoint(TensorBase): @classmethod def create(cls, t): return cls(tensor(t).view(-1,2).float()) # Awesome. Let's try to improve it a bit more. We also want to be careful about our image size, as we may need it for when we transform our image, etc. So let's pass this to it class myTensorPoint(TensorBase): @classmethod def create(cls, t, img_size=None)->None: return cls(tensor(t).view(-1,2).float(), img_size=img_size) # Let's try it im.shape pnts = [[1000,100], [200,300]] tps = myTensorPoint.create(pnts) tps # Awesome. Now we need a `show` method. We'll use a scatter plot as we are dealing with points class myTensorPoint(TensorBase): _show_args = dict(s=10, marker='.', c='r') @classmethod def create(cls, t, img_size=None)->None: return cls(tensor(t).view(-1,2).float(), img_size=img_size) def show(self, ctx=None, **kwargs): if 'figsize' in kwargs: del kwargs['figsize'] x = self.view(-1,2) ctx.scatter(x[:,0], x[:,1], **{**self._show_args, **kwargs}) return ctx # Let's try this tps = myTensorPoint.create(pnts) tps.show() # Hmmm. Why does this not work? Well we want to overlay it on our image! Let's try passing in an image too ctx = im.show() tps.show(ctx=ctx) # Now we see them! # Now there's a few other bits that we want to do. Let's first make our `myTensorPoint.create` into a `Tranform`, to allow for what's called `setups`, we will see more on this later # + # Transform?? # - myTensorPointCreate = Transform(myTensorPoint.create) # Any time we deal with these points, we want a loss function of `MSELossFlat`, lets do this by default (so `cnn_learner` knows which loss function to use!) myTensorPointCreate.loss_func = MSELossFlat() # And now let's replace our original `myTensorPoint`'s `create` function with this new one myTensorPoint.create = myTensorPointCreate # How close were we to the source code? # + class TensorPoint(TensorBase): "Basic type for points in an image" _show_args = dict(s=10, marker='.', c='r') @classmethod def create(cls, t, img_size=None)->None: "Convert an array or a list of points `t` to a `Tensor`" return cls(tensor(t).view(-1, 2).float(), img_size=img_size) def show(self, ctx=None, **kwargs): if 'figsize' in kwargs: del kwargs['figsize'] x = self.view(-1,2) ctx.scatter(x[:, 0], x[:, 1], **{**self._show_args, **kwargs}) return ctx TensorPointCreate = Transform(TensorPoint.create) TensorPointCreate.loss_func = MSELossFlat() TensorPoint.create = TensorPointCreate # - # So now we have seen how to create an item type, and what is needed. Now how do I make sure I deal with the `transforms`? For instance with keypoints, I need to scale the image and warp it depending on the transforms (such as cropping) # # PointScaler # # What does the following code tell us about this? PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler) # `PointScaler` is an **item transform**, which means it occurs each time we call the particular item, not when it is turned into a batch. Which is what we need to have happen, as we crop our images during an item transform! # Now how do we deal with this? Transforms have the following: # * `order` - when does it occur? The lower the value, the sooner it is done # * `setups` - When we prepare our data, such as our `data.c`? # * `encodes` - When we are transforming our image # * `decodes` - When we are decoding our image # Let's walk through the start of `PointScaler`. We want it to scale our points and possibly operate differently if we pass in `y` then `x` instead of `x,y` class myPointScaler(Transform): order = 1 # Want this to occur first! def __init__(self, do_scale=True, y_first=False): self.do_scale, self.y_first = do_scale, y_first # Now let's grab some `setups`. We want this to take the total available points in our dataset, we can use `.numel()` to do this tps.numel() class myPointScaler(Transform): order = 1 # Want this to occur first! def __init__(self, do_scale=True, y_first=False): self.do_scale, self.y_first = do_scale, y_first def setups(self, dl): its = dl.do_item(0) for t in its: if isinstance(t, TensorPoint): self.c = t.numel() # Where do I go from here? `encodes` and `decodes` work by seeing if `x` follows a type, and if so we perform it. For our input, we want to see what the current size is of our points. Let's first make a method to get the size class myPointScaler(Transform): order = 1 # Want this to occur first! def __init__(self, do_scale=True, y_first=False): self.do_scale, self.y_first = do_scale, y_first def setups(self, dl): its = dl.do_item(0) for t in its: if isinstance(t, TensorPoint): self.c = t.numel() def _grab_sz(self, x): self.sz = [x.shape[-1], x.shape[-2]] if isinstance(x, Tensor) else x.size # Now let's make an encodes and decodes which just grabs the shape of our points **if** we have an image class myPointScaler(Transform): order = 1 # Want this to occur first! def __init__(self, do_scale=True, y_first=False): self.do_scale, self.y_first = do_scale, y_first def setups(self, dl): its = dl.do_item(0) for t in its: if isinstance(t, TensorPoint): self.c = t.numel() def _grab_sz(self, x): self.sz = [x.shape[-1], x.shape[-2]] if isinstance(x, Tensor) else x.size def encodes(self, x:(PILBase, TensorImageBase)): return self._grab_sz(x) def decodes(self, x:(PILBase, TensorImageBase)): return self._grab_sz(x) # Now let's make a new one that should either scale or unscale our points based on a transformation. We'll make a simple `scale_pnts` function to scale them together def _myScale_pnts(y, sz, do_scale=True, y_first=False): if y_first: y = y.flip(1) res = y * 2/tensor(sz).float() -1 if do_scale else y return TensorPoint(res, img_sz=sz) # Does this work? Let's try tps _myScale_pnts(tps, 224) # What would a point at the end of an image look like? im.shape pnts = [[0,0], [2370,0], [0,1927], [2370, 1927]] tps = TensorPoint.create(pnts) tps ax = im.show() for tp in tps: tp.show(ctx=ax) s_pnts = [_myScale_pnts(tp, 224) for tp in tps] s_pnts # Next question: does this hold for other images and image sizes? url2 = 'https://geekologie.com/2019/08/28/crazy-maine-coon-cat.jpg' download_url(url2, 'cat2.jpg') im2 = PILImage.create('cat2.jpg') im2.shape pnts2 = [[0,0], [640,0], [0,770], [640, 770]] tps2 = TensorPoint.create(pnts2) ax = im2.show() for tp in tps2: tp.show(ctx=ax) [_myScale_pnts(tp, 224) for tp in tps2] # We can see that (0,0) is always `-1,-1` # Now we need a way to undo this. def _myUnscale_pnts(y, sz): return TensorPoint((y+1)*tensor(sz).float()/2, img_size=sz) s_pnts # We pass in what the tranformed size is, and we get back our original points [_myUnscale_pnts(tp, 224) for tp in s_pnts] # And that's it! We transform our points based on a new image size, and then have it be cropped, rotated, etc # + def _scale_pnts(y, sz, do_scale=True, y_first=False): if y_first: y = y.flip(1) res = y * 2/tensor(sz).float() - 1 if do_scale else y return TensorPoint(res, img_size=sz) def _unscale_pnts(y, sz): return TensorPoint((y+1) * tensor(sz).float()/2, img_size=sz) class PointScaler(Transform): "Scale a tensor representing points" order = 1 def __init__(self, do_scale=True, y_first=False): self.do_scale,self.y_first = do_scale,y_first def _grab_sz(self, x): self.sz = [x.shape[-1], x.shape[-2]] if isinstance(x, Tensor) else x.size return x def _get_sz(self, x): sz = x.get_meta('img_size') assert sz is not None or self.sz is not None, "Size could not be inferred, pass it in the init of your TensorPoint with `img_size=...`" return self.sz if sz is None else sz def setups(self, dl): its = dl.do_item(0) for t in its: if isinstance(t, TensorPoint): self.c = t.numel() def encodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x) def decodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x) def encodes(self, x:TensorPoint): return _scale_pnts(x, self._get_sz(x), self.do_scale, self.y_first) def decodes(self, x:TensorPoint): return _unscale_pnts(x.view(-1, 2), self._get_sz(x)) # - # Now how do we make a `Block` with our new bits? myPointBlock = TransformBlock(type_tfms=myTensorPoint.create, item_tfms=myPointScaler) # From there, if we want to generate a transform, we `@patch` it. For example, `flip_lr` def _neg_axis(x, axis): x[...,axis] = -x[...,axis] return x @patch def flip_lr(x:TensorPoint): return TensorPoint(_neg_axis(x.clone(), 0))
Computer Vision/03_Internal_API_Walkthrough.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from neo4j.v1 import GraphDatabase class HelloWorldExample(object): def __init__(self, uri, user, password): self._driver = GraphDatabase.driver(uri, auth=(user, password)) def close(self): self._driver.close() def print_greeting(self, message): with self._driver.session() as session: greeting = session.write_transaction(self._create_and_return_greeting, message) print(greeting) @staticmethod def _create_and_return_greeting(tx, message): result = tx.run("CREATE (a:Greeting) " "SET a.message = $message " "RETURN a.message + ', from node ' + id(a)", message=message) return result.single()[0] # + from py2neo import Graph # set up authentication parameters g = Graph("bolt://10.0.0.58:7687", user="neo4j",password="<PASSWORD>") #return all nodes c=g.run("MATCH (n) RETURN n") c.data() #put it in a dataframe from pandas import DataFrame c=g.run("MATCH (n) RETURN n") pd=DataFrame(c.data()) print(pd) # - from py2neo import neo4j, ogm, node, rel g.node(1)
Neo4j Examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # English Wikipedia Page Views # **The purpose of this notebook is to construct, analyze, and publish a dataset of monthly traffic on English Wikipedia from January 1 2008 through September 30 2018. The plot created at the end is a reproduction of the English Wikipedia Page Views graph, originally created by <NAME>. ** # + # Import libraries import json import requests import pandas as pd import numpy as np from functools import reduce import matplotlib.pyplot as plt # %matplotlib inline # To suppress warnings pd.options.mode.chained_assignment = None # - # ### Get the Wikipedia data # The data is sourced from two APIs: # 1) **The Legacy Pagecounts API** provides access to desktop and mobile traffic data from December 2007 through July 2016. This includes user as well as web crawler traffic. # See: https://wikimedia.org/api/rest_v1/#!/Legacy_data/get_metrics_legacy_pagecounts_aggregate_project_access_site_granularity_start_end # # 2) **The Pageviews API** provides access to desktop, mobile web, and mobile app traffic data from July 2015 through last month. This data will be filtered for user traffic using the agent=user filter as we want to focus on only organic traffic. # See: https://wikimedia.org/api/rest_v1/#!/Pageviews_data/get_metrics_pageviews_aggregate_project_access_agent_granularity_start_end # #### Specify the endpoints # Specify the endpoints of the two APIs endpoint_legacy = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access-site}/{granularity}/{start}/{end}' endpoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}' # #### Specify the parameters # + # Legacy Pagecounts API # parameters for getting aggregated legacy view data for desktop desktop_params_legacy = {"project" : "en.wikipedia.org", "access-site" : "desktop-site", "granularity" : "monthly", "start" : "2001010100", # for end use 1st day of month following final month of data "end" : "2018100100" } # parameters for getting aggregated legacy view data for mobile mobile_params_legacy = {"project" : "en.wikipedia.org", "access-site" : "mobile-site", "granularity" : "monthly", "start" : "2001010100", # for end use 1st day of month following final month of data "end" : "2018100100" } # Current Pageviews API # parameters for getting aggregated current standard pageview data for desktop desktop_params_pageviews = {"project" : "en.wikipedia.org", "access" : "desktop", "agent" : "user", "granularity" : "monthly", "start" : "2001010100", # for end use 1st day of month following final month of data "end" : '2018101000' } # parameters for getting aggregated current standard pageview data for mobile-app mobile_app_params_pageviews = {"project" : "en.wikipedia.org", "access" : "mobile-app", "agent" : "user", "granularity" : "monthly", "start" : "2001010100", # for end use 1st day of month following final month of data "end" : '2018101000' } # parameters for getting aggregated current standard pageview data for mobile-web mobile_web_params_pageviews = {"project" : "en.wikipedia.org", "access" : "mobile-web", "agent" : "user", "granularity" : "monthly", "start" : "2001010100", # for end use 1st day of month following final month of data "end" : '2018101000' } # Customize these with your own information headers = { 'User-Agent': 'https://github.com/priyankam22', 'From': '<EMAIL>' } # - # #### Make calls to the API to get data # A function to make requests to the API for data def api_call(endpoint, parameters, headers): ''' This function makes a call to the API endpoint and returns a Python dictionary. Inputs: endpoint - the endpoint of the API to be called parameters - parameters that can be passed to the API headers - some header information which specifies identity of the caller Outputs: reponse - python dictionary of data returned by API ''' call = requests.get(endpoint.format(**parameters), headers=headers) response = call.json() return response # + # Get the legacy data from the Pagecounts API desktop_monthly_legacy = api_call(endpoint_legacy, desktop_params_legacy, headers) mobile_monthly_legacy = api_call(endpoint_legacy, mobile_params_legacy, headers) # Get current data from the Pageviews API desktop_monthly_pageviews = api_call(endpoint_pageviews, desktop_params_pageviews, headers) mobile_app_monthly_pageviews = api_call(endpoint_pageviews, mobile_app_params_pageviews, headers) mobile_web_monthly_pageviews = api_call(endpoint_pageviews, mobile_web_params_pageviews, headers) # - # The below two steps are for reproducibility and can be skipped if you are pulling the data from the APIs. # Save the data in json files on disk, This step saves intermediate results which can be used for reproducibility in future. with open('../data/pagecounts_desktop-site_200712-201809.json','w') as outfile: json.dump(desktop_monthly_legacy, outfile) with open('../data/pagecounts_mobile-site_200712-201809.json','w') as outfile: json.dump(mobile_monthly_legacy, outfile) with open('../data/pageviews_desktop_200712-201809.json','w') as outfile: json.dump(desktop_monthly_pageviews, outfile) with open('../data/pageviews_mobile-app_200712-201809.json','w') as outfile: json.dump(mobile_app_monthly_pageviews, outfile) with open('../data/pageviews_mobile-web_200712-201809.json','w') as outfile: json.dump(mobile_web_monthly_pageviews, outfile) # Load the data from json files if these files are available. with open('../data/pagecounts_desktop-site_200712-201809.json') as infile: desktop_monthly_legacy = json.load(infile) with open('../data/pagecounts_mobile-site_200712-201809.json') as infile: mobile_monthly_legacy = json.load(infile) with open('../data/pageviews_desktop_200712-201809.json') as infile: desktop_monthly_pageviews = json.load(infile) with open('../data/pageviews_mobile-app_200712-201809.json') as infile: mobile_app_monthly_pageviews = json.load(infile) with open('../data/pagecounts_mobile-web_200712-201809.json') as infile: mobile_web_monthly_pageviews = json.load(infile) # ### Data Processing # + # Load the dict data into Pandas dataframes for easy manipulation desktop_monthly_pagecounts_df = pd.DataFrame(desktop_monthly_legacy['items']) mobile_monthly_pagecounts_df = pd.DataFrame(mobile_monthly_legacy['items']) desktop_monthly_pageviews_df = pd.DataFrame(desktop_monthly_pageviews['items']) mobile_app_monthly_pageviews_df = pd.DataFrame(mobile_app_monthly_pageviews['items']) mobile_web_monthly_pageviews_df = pd.DataFrame(mobile_web_monthly_pageviews['items']) # - # Add the mobile app and mobile web data for Pageviews as we are interested in the total mobile views mobile_monthly_pageviews_df = mobile_app_monthly_pageviews_df.merge(mobile_web_monthly_pageviews_df, on='timestamp') mobile_monthly_pageviews_df['pageview_mobile_views'] = mobile_monthly_pageviews_df['views_x'] + mobile_monthly_pageviews_df['views_y'] mobile_monthly_pageviews_df[['timestamp','pageview_mobile_views']][0:10] # #### Merge the 4 dataframes on timestamp # + # Renaming the views columns for merging desktop_monthly_pageviews_df.rename(columns={'views': 'pageview_desktop_views'}, inplace=True) desktop_monthly_pagecounts_df.rename(columns={'count': 'pagecount_desktop_views'}, inplace=True) mobile_monthly_pagecounts_df.rename(columns={'count': 'pagecount_mobile_views'}, inplace=True) # Select the relevant columns for merging dfs = [ desktop_monthly_pagecounts_df[['timestamp','pagecount_desktop_views']], mobile_monthly_pagecounts_df[['timestamp','pagecount_mobile_views']], desktop_monthly_pageviews_df[['timestamp','pageview_desktop_views']], mobile_monthly_pageviews_df[['timestamp','pageview_mobile_views']] ] # Merge the dataframes on timestamp dataset = reduce(lambda left, right: pd.merge(left, right, on='timestamp', how='outer'), dfs) # + # Replace nans with 0 dataset.fillna(0, inplace=True) # Create the total views for each API dataset['pagecount_all_views'] = dataset['pagecount_desktop_views'] + dataset['pagecount_mobile_views'] dataset['pageview_all_views'] = dataset['pageview_desktop_views'] + dataset['pageview_mobile_views'] # Extract the year and month from timestamp dataset['year'] = dataset['timestamp'].apply(lambda x: x[:4]) dataset['month'] = dataset['timestamp'].apply(lambda x: x[4:6]) dataset.drop(['timestamp'], axis=1, inplace=True) # Rearranging the columns dataset = dataset[['year','month','pagecount_all_views','pagecount_desktop_views','pagecount_mobile_views','pageview_all_views','pageview_desktop_views','pageview_mobile_views']] # Write the resulting dataframe to csv dataset.to_csv('../data/en-wikipedia_traffic_200712-201809.csv') # - # ### Plot the Pageviews as a time series # + # Divide all the view counts by 1000000 for better readability of the plots dataset['pagecount_desktop_views'] = dataset['pagecount_desktop_views'].apply(lambda x: x/10**6) dataset['pagecount_mobile_views'] = dataset['pagecount_mobile_views'].apply(lambda x: x/10**6) dataset['pageview_desktop_views'] = dataset['pageview_desktop_views'].apply(lambda x: x/10**6) dataset['pageview_mobile_views'] = dataset['pageview_mobile_views'].apply(lambda x: x/10**6) dataset['pagecount_all_views'] = dataset['pagecount_all_views'].apply(lambda x: x/10**6) dataset['pageview_all_views'] = dataset['pageview_all_views'].apply(lambda x: x/10**6) # Combine the year and month for xaxis dataset['timestamp'] = dataset['year']+dataset['month'] # + # Set the Aug 2016 values for legacy columns to zero as the data collection looks incomplete in that month # The legacy system seems to be discontinued after 201608. mask = (dataset['year'] == '2016') & (dataset['month'] == '08') dataset.loc[mask, 'pagecount_all_views'] = np.nan dataset.loc[mask, 'pagecount_desktop_views'] = np.nan dataset.loc[mask, 'pagecount_mobile_views'] = np.nan # Replace 0 with nan so that it is not plotted on the graph dataset.replace(0,np.nan, inplace=True) # + # Set default fintsize for the plot plt.rcParams.update({'font.size': 15}) # Add lines for all the different columns in the dataset fig, ax = plt.subplots(figsize=(18,6)) plt.plot(dataset['timestamp'], dataset['pagecount_desktop_views'],'g--', label='Pagecount Desktop') plt.plot(dataset['timestamp'], dataset['pagecount_mobile_views'],'b--', label='Pagecount Mobile') plt.plot(dataset['timestamp'], dataset['pagecount_all_views'],'k--', label='Pagecount Total') plt.plot(dataset['timestamp'], dataset['pageview_desktop_views'],'g', label='Pageview Desktop') plt.plot(dataset['timestamp'], dataset['pageview_mobile_views'],'b', label='Pageview Mobile') plt.plot(dataset['timestamp'], dataset['pageview_all_views'],'k', label='Pageview Total') # Limit the xaxis plt.xlim('200801','201812') # Limit the xaxis labels to first month of each year and reformat to year plt.draw() labels = [l.get_text()[:4] if l.get_text()[-2:] == '01' else '' for l in ax.get_xticklabels() ] ax.set_xticklabels(labels) plt.legend(loc='upper left') plt.tight_layout() plt.title("Page Views on English Wikipedia", fontsize=20) plt.xlabel("Year", fontsize=16) plt.ylabel("Page views in millions", fontsize=16) plt.tick_params(labelsize=15) plt.grid() plt.annotate('**Pagecount is the legacy definition. Since May 2015, a new Pageview defintion was used to eliminate all crawler traffic. ', (0,0),(0, -70), xycoords='axes fraction', textcoords='offset points',color='gray', size=16 ) plt.show() # - # Save file to plot folder fig.savefig('English Wikipedia PageViews Graph.jpeg', format='jpeg')
A1- Data Curation/English_Wikipedia_Page_Views_2007-2018.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 (Conda) # language: python # name: python3.6 # --- # + import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import h5py from sklearn.decomposition import PCA from sklearn.manifold import TSNE from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Flatten from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.resnet50 import ResNet50 import sys sys.path.append("../src") from data_loader import * # - event="0210" size ="128" #a = np.load("../data/real/images/run_"+event+"_label_True_size_"+size+".npy") x_set = np.load("../data/latent/clf_latent/vgg_data_repr.npy") y_set = np.load("../data/latent/clf_latent/targets.npy") _, x, y = load_real_event("128") print(x_set[0].shape) # + which = 1 data = x_set[which] targets = y_set[which] true_class = targets.argmax(1) pca_repr = PCA(50).fit_transform(data) # - dim_red = TSNE(2, perplexity=15, init="pca") data_to_plot = dim_red.fit_transform(pca_repr) #print(dim_red.explained_variance_ratio_) # + fig, ax = plt.subplots() classes = ["Proton", "Carbon", "Other"] for c in np.unique(true_class): w = true_class == c ax.scatter(data_to_plot[w][:,0], data_to_plot[w][:,1], alpha=0.4, label=classes[c]) plt.legend() # - def logreg(input_shape, n_classes=2, lmd=0.2): model = keras.models.Sequential() model.add( keras.layers.Dense( n_classes, input_shape=input_shape, kernel_regularizer="l2", ) ) model.add( keras.layers.BatchNormalization() ) model.add( keras.layers.Activation("sigmoid") ) return model # + def vgg_model(input_dim): input_layer = Input(shape=input_dim) vgg = VGG16(include_top=False, input_tensor=input_layer) which_o = 3 o = Flatten()(vgg.layers[which_o].output) return Model(inputs=input_layer, outputs=o) def resnet_model(input_dim): input_layer = Input(shape=input_dim) res_net = ResNet50(include_top=False, input_tensor=input_layer) o = Flatten()(res_net.output) return Model(inputs=input_layer, outputs=o) # - model = vgg_model((128, 128, 3)) model_repr = model.predict(np.concatenate([x, x, x], -1)) model_repr.shape pca = PCA(1500, svd_solver="randomized") pca.fit(model_repr) fig, ax = plt.subplots() ax.plot(pca.explained_variance_ratio_) pca_repr = pca.transform(model_repr) #pca_vgg_test = pca.transform(vgg_model.predict(original_test)) # + from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(pca_repr, y) clf_model = RandomForestClassifier(max_features=0.1, class_weight="balanced") clf_model.fit(xtr, ytr) print(clf_model.score(xte, yte)) # - important = clf_model.feature_importances_ > 1e-2 plt.hist(clf_model.feature_importances_[important]) most_import = np.argsort(-clf_model.feature_importances_)[:2] print(most_import) pca_2dim = pca_repr[:, most_import] # + y_m = y.argmax(1) classes = np.unique(y_m) for c in classes: which = y_m == c plt.scatter(pca_2dim[which,0], pca_2dim[which,1], label=classes[c]) plt.legend() # + from sklearn.cluster import MiniBatchKMeans cluster_model = MiniBatchKMeans( n_clusters=3, batch_size=150, n_init=100, ) cluster_model.fit(pca_vgg_train) train_pred = cluster_model.predict(pca_vgg_train) test_pred = cluster_model.predict(pca_vgg_test) # + def latent_distance(x, y, weight_func=lambda x: x): """ x and y should be T by L matrices this function measures euclidian distance along T and reduces to a float along L """ sub = x - y sub = weight_func(sub) tmp = np.power(sub, 2) tmp = np.sum(tmp, axis=1) tmp = np.sqrt(tmp,) return np.sum(tmp) def euclidian(x, y): return(np.sqrt(np.sum(np.power(x-y, 2)))) n_events = pca_vgg_train.shape[0] train_dist_matrix = np.zeros((n_events, n_events)) n_test = pca_vgg_test.shape[0] test_dist_matrix = np.zeros((n_test, n_test)) T = np.expand_dims(np.arange(pca_vgg_train.shape[0],), -1) linear_weight = lambda x: x/(1 + T ) for i in range(n_events): for j in range(n_events): #dist_matrix[i, j] = latent_distance(original_latent[:, i, :], original_latent[:, j, :], weight_func=linear_weight) train_dist_matrix[i, j] = euclidian(pca_vgg_train[i, :], pca_vgg_train[j, :]) for i in range(n_test): for j in range(n_test): #dist_matrix[i, j] = latent_distance(original_latent[:, i, :], original_latent[:, j, :], weight_func=linear_weight) test_dist_matrix[i, j] = euclidian(pca_vgg_test[i, :], pca_vgg_test[j, :]) # + # %matplotlib notebook flat_dist = train_dist_matrix.flatten() flat_dist.sort() plt.plot(flat_dist, "ko", alpha=0.4) # + from sklearn.cluster import DBSCAN cluster_model = DBSCAN( eps=200, metric="precomputed", min_samples=8 ) train_pred = cluster_model.fit_predict(train_dist_matrix) #test_pred = cluster_model.transform(test_dist_matrix) # - def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' else: title = 'Confusion matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data #classes = classes[unique_labels(y_true, y_pred)] if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax # + from sklearn.metrics import adjusted_rand_score, normalized_mutual_info_score, confusion_matrix a = plot_confusion_matrix(train_targets, train_pred, ["proton", "carbon", "junk"]) a.set_title("Confusion matrix for Train") print("Scores on Train: ") print("ARI : ", adjusted_rand_score(train_targets, train_pred)) print("NMI : ", normalized_mutual_info_score(train_targets, train_pred)) # + a = plot_confusion_matrix(test_targets, test_pred, ["proton", "carbon", "junk"]) a.set_title("Confusion matrix for test") print("Scores on Train: ") print("ARI : ", adjusted_rand_score(test_targets, test_pred)) print("NMI : ", normalized_mutual_info_score(test_targets, test_pred)) # + from sklearn.manifold import TSNE projection = TSNE(2, perplexity=34, learning_rate=10).fit_transform(pca_vgg_train) # + proton = projection[train_targets==0] carbon = projection[train_targets==1] junk = projection[train_targets==2] fig, ax = plt.subplots() ax.scatter(proton[:,0], proton[:,1], c="r", alpha=0.6) ax.scatter(carbon[:,0], carbon[:,1], c="g", alpha=0.2) ax.scatter(junk[:,0], junk[:,1], c="b", alpha=0.2) plt.show() # - fig, ax = plt.subplots() ax.scatter(projection[:,0], projection[:,1], alpha=0.2) plt.show()
notebooks/vgg_clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv-datascience # language: python # name: venv-datascience # --- import numpy as np # # 1.Data Types & Attributes # ## A) NumPy Array # main data type is ndarray a1 = np.array([1,2,3]) a1 type(a1) a1.shape a2 = np.array([[1, 2.1, 3.2], [4.1, 5.2, 6.3]]) a2 a2.shape a3 = np.array([ [[1,2,3],[4,5,6],[1.1,1.2,1.3]], [[7,8,9],[10,11,12],[2.2,2.3,2.4]] ]) a3 a3.shape a1.shape a2.shape # checking number of dimensions a1.ndim, a2.ndim, a3.ndim # checking data type a1.dtype, a2.dtype, a3.dtype # checking size a1.size, a2.size, a3.size # checking type type(a1), type(a2), type(a3) # ## creating data frame from ndarray import pandas as pd df = pd.DataFrame(a2) df arr = np.array([[1,2,3],[4,5,6]], dtype=np.float32) print(arr) print(repr(arr)) # ## np.array upcasting # np.array upcasting arr = np.array([1,2.0,3]) print(repr(arr)) print(arr.dtype) # ## B) Copying # + a = np.array([0, 1]) b = np.array([9, 8]) c = a print('Array a: {}'.format(repr(a))) c[0] = 5 print('Array a: {}'.format(repr(a))) d = b.copy() d[0] = 6 print('Array b: {}'.format(repr(b))) # - # ## C) Casting arr = np.array([0, 1, 2]) print(arr.dtype) arr = arr.astype(np.float32) print(arr.dtype) # ## D) NaN # + #NaN as place holder #NaN cannot take integer arr = np.array([np.nan, 1, 2]) print(repr(arr)) arr = np.array([np.nan, 'abc']) print(repr(arr)) # Will result in a ValueError np.array([np.nan, 1, 2], dtype=np.int32) # - # ## E) Infinity # + # infinity can be postive or negative # infinity cannot take integer type print(np.inf > 1000000) arr = np.array([np.inf, 5]) print(repr(arr)) arr = np.array([-np.inf, 1]) print(repr(arr)) # Will result in an OverflowError np.array([np.inf, 3], dtype=np.int32) # - # ## Exercise # + # Set arr equal to np.array applied to the specified list. arr = np.array([np.nan, 2,3,4,5]) # - type(arr) print(repr(arr)) # + # #copy and change element of first array arr2 = arr.copy() arr2[0] = 10 print(arr2) print(arr) # + # type cast the array element float_arr = np.array([1, 5.4, 3]) float_arr2 = arr2.astype(np.float32) print(repr(float_arr)) print(repr(float_arr2)) # + # multi dimensional array matrix = np.array([[1,2,3],[4,5,6]], dtype=np.float32) print(repr(matrix)) print(matrix) # - # # 2. Creating NumPy Arrays # # - np.array() # - np.ones() # - np.zeros() # - np.random.rand(5, 3) # - np.random.randint(10, size=5) # - np.random.seed() - pseudo random numbers # - Searching the documentation example (finding np.unique() and using it) sample_array = np.array([1,2,3]) sample_array ones = np.ones((2,3), dtype=float) ones # + # usually ones and zeros are used for placeholder # - zeros = np.zeros((3,2,3)) zeros range_array = np.arange(0, 20, 2) range_array # random random_array = np.random.randint(0, 20, size=(2,3)) random_array random_array.size random_array.shape random_array2 = np.random.random(size=(2,3,5)) random_array2 random_array2.shape random_array2.size random_array3 = np.random.rand(2,3) random_array3 # + #random seed np.random.seed(0) #Pseudo - random numbers random_array4 = np.random.randint(10, size=(4,3)) random_array4 # - np.random.seed(7) random_array5 = np.random.random(size=(4,3)) random_array5 # # 3. Viewing arrays and matrics #finding unique numbers in array np.unique(random_array4) a1 a2 a3 a1[0] a2[0] a2[0][1] a3.shape a3[0] a3 a3[1][1][1] a3.shape a3[:2, :2, :2] a4 = np.random.randint(10, size=(2,3,4,5)) a4.shape, a4.ndim a4 # get the first 5 numbers of inner most arrays a4[1:, 2: , :1, :] # if I want 1 and 4 a4[:, 1:2, 1:2, 2:3] # # 4. Manipulation and Comparing arrays # ## Arithmetics a1 ones = np.ones(3) ones a1 + ones a1 - ones a1 * ones a1 a2 a1 * a2 a3 a2 * a3 # how can we make a2 and a3 compitbale to make calculations ? a3_new = a3.reshape((6,3)) a3_new a2 a1 / ones a2 / a1 # Floor division or integer division a2 // a1 a2 a2 ** 2 np.square(a2) np.add(a1, ones) a1 a1 % 2 np.mod(a1, 2) np.exp(a1) np.log(a1) # ## Aggregration a1 a1.sum() np.sum(a1) sum(a1) # Use Python's method `sum()` on python data types. # Use NumPy's method `np.sum()` on numpy arrays # create massive array massive_array = np.random.random(10000) massive_array.size massive_array[:10] # %timeit sum(massive_array) # %timeit massive_array.sum() a2 a2.mean() np.mean(a2) np.max(a2) a2.min() a2.std() a2.var() np.sqrt(a2.var()) # + # std dev , variance high_var_array = np.array([100, 200, 500, 1000, 5000]) low_var_array = np.array([2,4,6,8,10]) high_var_array.var(), low_var_array.var() # - high_var_array.std(), low_var_array.std() # + # %matplotlib inline import matplotlib.pyplot as plt plt.hist(high_var_array) plt.show() # - plt.hist(low_var_array) plt.show() # # Reshape and Transpose a2 a2.shape a3 a3.shape a2 * a3 a2 a2.shape #reshape a2 a2_reshape = a2.reshape(2,3,1) a2_reshape a2_reshape.shape a2_reshape * a3 a2 # Transpose switches the axies a2.T a2.shape a3.shape a3 a3.T a3.shape # # Dot Product & Element Wise # ## Dot Product # + np.random.seed(0) mat1 = np.random.randint(10, size=(5,3)) mat2 = np.random.randint(10, size=(5,3)) # - mat1 mat2 mat1.shape, mat2.shape #Element wise multiplication mat1 * mat2 # Dot Product np.dot(mat1, mat2) #using reshape mat2_reshape = mat2.reshape((3,5)) mat2_reshape mat1.dot(mat2_reshape) # using transpose mat1_T = mat1.T mat1_T.shape mat1_T.dot(mat2) # ## Dot Product (Nut Butter Sales) # + import pandas as pd np.random.seed(0) # number of jars sold sales_amt = np.random.randint(20, size=(5,3)) sales_amt # - # create weekly sales dataframe weekly_sales_df= pd.DataFrame(sales_amt, index=["Mon", "Tue", "Wed", "Thu", "Fri"], columns = ["Almond Butter", "Peanut Butter", "Cashew Butter"]) weekly_sales_df weekly_sales_df.shape # butter prices prices = np.array([[10], [8], [12]]) prices.shape prices_df = pd.DataFrame(prices, index= ["Almond Butter", "Peanut Butter", "Cashew Butter"], columns = ["price"]) prices_df prices_df.shape #calculating total weekly sales daily_sales_df = weekly_sales_df.dot(prices_df) daily_sales_df #combine weekly total sales to weekly sales df weekly_sales_df["Total Sales $"] = daily_sales_df weekly_sales_df # # Comparison Operators a1 a2 a1 < a2 a1 == a2 a1 == a1 a1 > a2 a1 >= a2 # # Sorting Arrays random_array = np.random.randint(10, size=(3,5)) random_array np.sort(random_array, axis=0) np.sort(random_array, axis=1) # ### Sorting the indexes np.argsort(random_array, axis=0) a1 np.argsort(a1, axis=0) # index of min value np.argmin(a1) # index of max value np.argmax(a1) random_array np.argmax(random_array) random_array[0,4] np.argmax(random_array, axis=1) #comparing along the columns #9 is at the 4th index for first row, another 9 is at the 1st index for second row, 5 is at 1st index of third row np.argmax(random_array, axis=0) #comparing along the rows #8 is at the 1st index of first column, 9 is at the 1st index of second column, 4 is at 1st index of third column # 5 is at 0th index of fourth column, 9 is at 0th index of fifth column # # Pratical Example: NumPy in Action ! # <img src="images/panda.png"/> # + #Turn image into numPy Array from matplotlib.image import imread panda = imread("images/panda.png") panda # - type(panda) panda.size, panda.shape, panda.ndim # <img src="images/car-photo.png"/> car = imread("images/car-photo.png") type(car) # <img src="images/dog-photo.png"/> dog = imread("images/dog-photo.png") type(dog)
Complete Machine Learning and Data Science - Zero to Mastery - AN/07.NumPy/numpy data analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- ages=[10,30,20,30,20,35,35,67,87,45,34,16,55,17,16,65,19,10,43,23,67,30,34,72,81,35,67,87,45,34,16] len(ages) import numpy as np np.mean(ages) sample_size=10 sample_age=np.random.choice(ages,sample_size) sample_age from scipy.stats import ttest_1samp ttest,p_value=ttest_1samp(sample_age,40) print(p_value) if p_value<0.05: print('we are rejecting our null hypo') else: print('we are accepting null hypo') # #### 2 sample t_test import numpy as np import scipy.stats as stats classa_height=stats.poisson.rvs(mu=30,size=60) classb_height=stats.poisson.rvs(mu=33,size=60) classa_height np.mean(classa_height) np.mean(classb_height) from scipy.stats import ttest_ind t_test,p_value=ttest_ind(a=classa_height,b=classb_height,equal_var=False) p_value if p_value<0.05: print('we are rejecting our null hypo') else: print('we are accepting null hypo') # #### Paired T_test import pandas as pd df=pd.read_csv('F:\Statistics\Data/Blood_pressure.csv') df df[['bp_before','bp_after']].describe() # ##### H0->u=0 # #### H1-> u=/0 from scipy.stats import ttest_rel t,p_value=ttest_rel(df['bp_before'],df['bp_after']) p_value if p_value<0.05: print('we are rejecting our null hypo') else: print('we are accepting null hypo') # #### 156+-11.45 # #### 151.358333+-14.17
Stats module 4/Stats_Ttest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Exercise 2 - Simple Linear Regression # ===================== # # We want to know how to make our chocolate-bar customers happier. To do this, we need to know which chocolate bar _features_ predict customer happiness. For example, customers may be happier when chocolate bars are bigger, or when they contain more cocoa. # # We have data on customer happiness when eating chocolate bars with different features. Lets look at the relationship between happiness and bar size. # # Step 1 # -- # # First, lets have a look at our data. # # #### In the cell below replace the text `<printDataHere>` with `print(dataset.head())` and then press __Run__ in the toolbar above (or press __Shift+Enter__). # + import warnings warnings.filterwarnings("ignore") import pandas as pd import matplotlib.pyplot as graph import statsmodels.formula.api as smf from scipy import stats dataset = pd.read_csv('Data/chocolate data.txt', index_col=False, sep="\t",header=0) ### # REPLACE <PrintDataHere> WITH print(dataset.head()) ### dataset.head() ### # - dataset.describe().T # The data represents 100 different variations of chocolate bars and the measured customer happiness for each one. # # Step 2 # -- # # We want to know which chocolate bar features make customers happy. # # The example below shows a linear regression between __cocoa percentage__ and __happiness__. You can read through the comments to understand what is happening. # # #### __Run the code__ to to see the output visualized. # + # https://www.statsmodels.org/stable/example_formulas.html?highlight=statsmodels%20formula # https://patsy.readthedocs.io/en/latest/formulas.html#operators lm = smf.ols(formula = 'customer_happiness ~ cocoa_percent', data = dataset) # - lm = lm.fit() lm.params # + # y = m * x + b # m = slope, angle of the line # b = y intercept x=dataset['cocoa_percent'] b = lm.params[0] m = lm.params[1] y = m * x + b # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib.pyplot.plot graph.plot(x, y, label='model', color='red', linestyle='-') # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.scatter.html?highlight=scatter#matplotlib.pyplot.scatter graph.scatter(x, dataset['customer_happiness'], label='happiness') graph.ylabel('customer_happiness') graph.xlabel('cocoa_percent') graph.title('customer happiness by cocoa percentage') graph.legend() graph.show() # - #https://seaborn.pydata.org/introduction.html import seaborn as sns sns.set() g = sns.relplot(x="cocoa_percent", y="customer_happiness", size='sugar_percent', sizes=(20, 200), data=dataset, legend=False) # hue='sugar_percent' g.fig.set_size_inches(10,6) sns.lineplot(x=x, y=y, color='red') graph.show() # https://www.statisticshowto.datasciencecentral.com/lowess-smoothing/ sns.residplot(x="cocoa_percent", y="customer_happiness", data=dataset, lowess=True, color='r') graph.show() sns.regplot(x="cocoa_percent", y="customer_happiness", data=dataset); graph.show() # + # Run this cell! # DO NOT EDIT ANY OF THIS CODE # Define a function to perform a linear regression def PerformLinearRegression(formula): # This performs linear regression lm = smf.ols(formula = formula, data = dataset).fit() featureName=formula.split(" ")[-1] # get the data for the x parameter (our feature) train_X=dataset[featureName] # This makes and shows a graph intercept=lm.params[0] slope=lm.params[1] line = slope * train_X + intercept graph.plot(train_X, line, '-', c = 'red') graph.scatter(train_X, dataset.customer_happiness) graph.ylabel('customer_happiness') graph.xlabel(featureName) graph.show() # This performs the linear regression steps listed above # The text in red is the formula for our regression PerformLinearRegression('customer_happiness ~ cocoa_percent') # - # In the scatter plot above, each point represents an observation for a single chocolate bar. # # It seems that __more cocoa makes customers more happy__. We can tell, because as we increase the amount of cocoa (x axis) the amount of customer happiness (y axis) increases. # # Step 3 # ------ # # Let's look at some other features. # # #### Below, replace the text `<addFeatureHere>` with __`weight`__ to see if heavier chocolate bars make people happier. # # Also try the variables `sugar_percent` and `milk_percent` to see if these improve customers' experiences. fig, axs = graph.subplots(nrows=3, ncols=1, figsize=(10,20)) sns.regplot(x='weight', y='customer_happiness', data=dataset, ax=axs[0]) sns.regplot(x='sugar_percent', y='customer_happiness', data=dataset, ax=axs[1]) sns.regplot(x='milk_percent',y='customer_happiness', data=dataset, ax=axs[2]) graph.show() ### # CHANGE <addFeatureHere> TO weight IN THE LINE BELOW ### PerformLinearRegression('customer_happiness ~ weight') ### PerformLinearRegression('customer_happiness ~ sugar_percent') PerformLinearRegression('customer_happiness ~ milk_percent') # It looks like heavier chocolate bars make customers happier. The amount of milk or sugar, however, don't seem to make customers happier. # # Conclusion # --- # You have run a simple linear regression. This told us that if we want to make a chocolate bar that will make customers happy, it should be large and contain a lot of cocoa. # # Well done! You can now go back to the course and click __'Next Step'__ to move onto using linear regression with multiple features.
02. Linear Regression - Python.ipynb