code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GMNS to AequilibraE example # # ## Inputs # Nodes as a .csv flat file in GMNS format # omx skim files with numbers of trips # omx skim file with distnace # # # ## Steps # 1. Read the GMNS nodes, place centroids in dataframe # # 2. Read the trips # # 4. Generate new dataframe of O-D pairs # # #!/usr/bin/env python # coding: utf-8 print('<NAME>: Python code to extract selected zone pairs from a person trip table \n') import os from os.path import join import numpy as np import pandas as pd import sqlite3 #import shutil # needed? import openmatrix as omx import math run_folder = 'C:/Users/Scott/Documents/Work/GMNS/ATL' highest_centroid_node_number = 6031 def sl_dist(lat1, lon1, lat2, lon2): from math import radians, cos, sqrt, pow mile_per_lat = 69.0 mile_per_lon = mile_per_lat * math.cos(math.radians((lat1+lat2)/2.0)) latdiff = mile_per_lat * (lat1-lat2) londiff = mile_per_lon * (lon1-lon2) return sqrt(pow(latdiff,2) + pow(londiff,2)) # + #print(sl_dist(40,-75,41,-76)) # - # ## Read the nodes, and set up the dictionary of centroids # The dictionary of centroids is used later in setting up the omx trip table #Read the nodes node_csvfile = os.path.join(run_folder, 'GMNS_node.csv') df_taz = pd.read_csv(node_csvfile) #data already has headers print(df_taz.head()) #debugging df_size = df_taz.shape[0] print(df_size) #print(df_node.loc[700,"node_type"]) # drop the non centroids index = df_taz[df_taz['node_type'] != 'centroid'].index df_taz.drop(index, inplace=True) print(df_taz.head()) #debugging df_size = df_taz.shape[0] print(df_size) # Set up the dictionary of centroids # Assumption: the node_type = 'centroid' for centroid nodes # The centroid nodes are the lowest numbered nodes, at the beginning of the list of nodes, # but node numbers need not be consecutive tazdictrow = {} for index in df_taz.index: if df_taz['node_type'][index]=='centroid': #DEBUG print(index, df_node['node_id'][index], df_node['node_type'][index]) tazdictrow[df_taz['node_id'][index]]=index taz_list = list(tazdictrow.keys()) matrix_size = len(tazdictrow) #Matches the number of nodes flagged as centroids print(matrix_size) #DEBUG highest_centroid_node_number = max(tazdictrow, key=tazdictrow.get) #for future use print(highest_centroid_node_number) #DEBUG # + # Read the trip table and skim dembf = omx.open_file(join(run_folder, '0_tntp_data' ,'demand.omx'),'r') input_demand = np.array(dembf['matrix']) print('sum of trips',np.sum(input_demand)) spbf = omx.open_file(join(run_folder, '2_skim_results','sp_skim.omx'),'r') print('SP BASE SKIM FILE Shape:',spbf.shape(),' Tables:',spbf.list_matrices(),' Mappings:',spbf.list_mappings()) spbt = spbf['free_flow_time'] spbd = spbf['distance'] sp_dist = np.array(spbd) outdebugfile = open(os.path.join(run_folder,'debug_demand2.txt'),"w") # - for i in range(100): #matrix_size) for j in range(matrix_size): if(input_demand[i][j]>0): lon1 = df_taz.loc[i,'x_coord'] lat1 = df_taz.loc[i,'y_coord'] lon2 = df_taz.loc[j,'x_coord'] lat2 = df_taz.loc[j,'y_coord'] sp_distance = sp_dist[i][j] sl_distance = sl_dist(lat1,lon1,lat2,lon2) print(i,j,lat1, lon1, lat2, lon2,sp_distance,sl_distance,file=outdebugfile) outdebugfile.close() dembf.close() spbf.close()
Small_Network_Examples/Lima/AequilibraE/Circuity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tools for Python # ## pip # #### Searching for a package pip search astronomy # #### Install the latest version of a package by specifying a package’s name pip install novas # #### Install a specific version of a package by giving the package name followed by == and the version number pip install requests==2.6.0 # #### Upgrade pip install --upgrade requests # #### Uninstall pip uninstall novas -y # #### Display information about a particular package pip show requests # #### Display all of the packages installed in the environment pip list # #### Display list of packages installed but in the 'pip install' format # pip freeze > requirements.txt # # cat requirements.txt # #### Installation of all the necessary packages with the requirements file pip install -r requirements.txt # ## Pyenv # ### Sets the global version of Python to be used in all shells pyenv global # ### Sets a local application-specific Python version pyenv local 3.7.0 # ### Install a specific version pyenv install --list pyenv install --3.8.0 pyenv versions
3.outils/resources/Tools for Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import os, sys # %run prelims import opc_python import numpy as np import matplotlib.pyplot as plt import pandas from opc_python.utils import loading, scoring from opc_python.gerkin import dream,fit1,fit2,params perceptual_headers, perceptual_obs_data = loading.load_perceptual_data('training') all_CIDs = sorted(loading.get_CIDs('training')+loading.get_CIDs('leaderboard')+loading.get_CIDs('testset')) #mdx = dream.get_molecular_data(['dragon','episuite','morgan','nspdk','gramian'],all_CIDs) mdx = dream.get_molecular_data(['dragon','episuite','morgan'],all_CIDs) # ### Create matrices X_all,good1,good2,means,stds,imputer = dream.make_X(mdx,['training','leaderboard']) Y_all_mask,imputer = dream.make_Y_obs(['training','leaderboard'],target_dilution=None,imputer='mask') Y_all_imp,imputer = dream.make_Y_obs(['training','leaderboard'],target_dilution=None,imputer='median') X_gs,good1,good2,means,stds,imputer = dream.make_X(mdx,['testset'],good1=good1,good2=good2,means=means,stds=stds,target_dilution='gold') Y_gs,imputer = dream.make_Y_obs(['testset'],target_dilution='gold',imputer='mask') # + # Load optimal parameters (obtained from extensive cross-validation). cols = range(42) def get_params(i): return {col:params.best[col][i] for col in cols} use_et = get_params(0) max_features = get_params(1) max_depth = get_params(2) min_samples_leaf = get_params(3) trans_weight = get_params(4) regularize = get_params(4) use_mask = get_params(5) for col in range(21): trans_weight[col] = trans_weight[col+21] # - """ from sklearn.cross_validation import ShuffleSplit n_obs = int(len(Y_all_mask['subject'][1][:,col])/2) n_splits = 3 shuffle_split = ShuffleSplit(n_obs,n_splits,test_size=0.2,random_state=0) rs = np.zeros((21,49,n_splits)) X = X_all Y_imp = Y_all_imp['subject'] Y_mask = Y_all_mask['subject'] for k,(train,test) in enumerate(shuffle_split): print(k, flush=True) train = np.concatenate((2*train,2*train+1)) test = np.concatenate((2*test,2*test+1)) Y_train = {i:Y_imp[i][train] for i in range(1,50)} Y_test = {i:Y_mask[i][test] for i in range(1,50)} rfcs_cv,_,_ = fit1.rfc_final(X[train],Y_train, max_features,min_samples_leaf,max_depth,use_et, regularize=regularize,n_estimators=5) Y_cv = loading.make_prediction_files(rfcs_cv,X[test],X[test], 'all',1,Y_test=None, write=False,regularize=regularize) for col in range(21): for subject in range(1,50): rs[col,subject-1,k] = np.ma.corrcoef(Y_cv['subject'][subject][:,col], Y_test[subject][:,col])[0,1] print(rs.mean(axis=1)) """; X_train = X_all Y_train = Y_all_imp['subject'] #rfcs,_,_ = fit1.rfc_final(X_train,Y_train, # max_features,min_samples_leaf,max_depth,use_et, # regularize=regularize,n_estimators=25) #import pickle #with open('../../data/rfcs_1.pickle','wb') as f: # pickle.dump(rfcs,f,protocol=3) with open('../../data/rfcs_1.pickle','rb') as f: rfcs = pickle.load(f,protocol=3) Y_pred = loading.make_prediction_files(rfcs,X_gs,X_gs, 'all',1,Y_test=None, write=False,regularize=[0.8]*21) y_gs = np.ma.dstack([Y_gs['subject'][i] for i in range(1,50)]) y_pred = np.ma.dstack([Y_pred['subject'][i] for i in range(1,50)]) rs_gs = np.ma.zeros((21,49)) coe_gs = np.zeros((21,49)) for col in range(21): for subject in range(49): o = y_gs[:,col,subject].copy() p = y_pred[:,col,subject].copy() rs_gs[col,subject] = np.ma.corrcoef(o,p)[0,1] x = (o - p).compressed() sse = (x**2).mean()# - x.mean()**2 coe_gs[col,subject] = np.sqrt(sse.mean())# / p.mean() #np.save('../../data/sc1_all_rs.npy',rs_gs.data) #np.save('../../data/sc1_all_coes.npy',coe_gs) # + from scipy.stats import linregress data = loading.load_data_matrix(gold_standard_only=True, only_replicates=True) #coe_gs = np.load('../../data/sc1_all_coes.npy') fig,axes = plt.subplots(3,7,figsize=(15,10)) rs = np.zeros(21) ps = np.zeros(21) for i,ax in enumerate(axes.flat): dil = 1 if i==0 else slice(None) o = data[:,:,i,dil,0] r = data[:,:,i,dil,1] if len(o.shape)==3: o = o.mean(axis=2) # Collapse the dilution dimension r = r.mean(axis=2) # Collapse the dilution dimension trt_var = np.zeros(49) for j in range(49): x = (r[j,:] - o[j,:]).compressed() trt_var[j] = x.var()#(x**2).mean() - x.mean()**2 #val = np.ma.array(y_pred[:,i,:].mean(axis=0)*np.tan(np.arccos(rs_gs[i,:])),mask=np.isnan(rs_gs[i,:])) val = rs_gs[i,:] x = np.sqrt(y_gs[:,i,:].var(axis=0))# - trt_var)# / y_pred[:,i,:].mean(axis=0) ax.scatter(x,val) xmax = max(x.max(),val.max())*1.1 #ax.plot([0,xmax],[0,xmax],'--',color='k') _,_,rs[i],ps[i],_ = linregress(x,val) ax.set_title('%s\nR=%.2f p=%.3f' % (descriptors[i].split('/')[0],rs[i],ps[i])) if i==7: ax.set_ylabel('Prediction Quality (R)') if i==17: ax.set_xlabel('StDev of Actual Ratings') plt.tight_layout() plt.figure() #print(rs) plt.hist(rs,bins=np.linspace(-1,1,21)) plt.xlabel('Correlation between Subject Response Variability and Prediction Quality') plt.ylabel('# of descriptors') #plt.tight_layout() # + from scipy.io import matlab yg = matlab.loadmat('../../data/sc2_yg.mat') yg = yg['a2'] resort = [sorted([str(i) for i in range(1,50)]).index(str(s)) for s in range(1,50)] yg = yg[:,resort] fig,axes = plt.subplots(2,1,figsize=(15,10)) for i,array in enumerate([rs_gs,yg]): ax = axes.flat[i] ax.pcolor(array,vmin=-0.3,vmax=0.8,cmap='RdBu') ax.set_xlabel('Subject #') ax.set_xticks(np.arange(0.5,49.6)) ax.set_xticklabels(range(1,50)) ax.set_xlim(0,49) ax.set_ylabel('Descriptor #') ax.set_yticks(np.arange(0.5,21.5)) ax.set_yticklabels(range(1,22)) ax.set_ylim(0,21)
opc_python/gerkin/challenge1_subject_correlations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd setups = ["fronts", "reimprove-fronts"] indicators = ["hv"] nobjs = [2, 3, 5, 10] benchmarks = ["DTLZ", "WFG"] problems = { "WFG": list(range(1,10)), "DTLZ": [2, 4, 5, 6, 7], } nvars = { 2: [30, 40, 50], 3: [30, 40, 50], 5: [30, 40, 50], 10: [31, 41, 51], } data = [ { "setup": setup, "indicator": indicator, "nobj": nobj, "problem": f"{bench}{problem}", "nvar": nvar, "value": pd.read_csv(f"{setup}/{bench}{problem}.{nobj}.{nvar}.{indicator}", header=None)[0][0] } for setup in setups for indicator in indicators[:1] for nobj in nobjs for bench in benchmarks for problem in problems[bench] for nvar in nvars[nobj] ] df = pd.DataFrame(data) df.head() df.to_csv("fronts.csv", index=False) import seaborn as sns sns.set() sns.catplot( data=df, x="setup", y="value", hue="setup", col="nobj", row="problem", sharey=False, aspect=1, kind="box", ) df_wide = df.pivot_table(index=["indicator",'nobj','problem','nvar'], columns=["setup"], values=['value']) df_wide.head() df_rpd = (df_wide[('value', 'reimprove-fronts')] / df_wide[('value', 'fronts')] - 1).reset_index(name='value') df_rpd # + fig = sns.catplot( data=df_rpd, x="nobj", y="value", # hue='nvar', col="problem", col_wrap=3, # row="problem", sharey=False, aspect=1, kind="bar", order=[2,3,5,10] ) from matplotlib.ticker import PercentFormatter for sub in fig.axes.flat: sub.yaxis.set_major_formatter(PercentFormatter(1, decimals=2)) # -
_util/fronts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: ipykernel_py2 # --- # ## Calculating the Return of Indices # *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).* # Consider three famous American market indices – Dow Jones, S&P 500, and the Nasdaq for the period of 1st of January 2000 until today. import numpy as np import pandas as pd from pandas_datareader import data as wb import matplotlib.pyplot as plt ind_data = pd.read_csv('D:\Python\Indices_Exercise_Data.csv', index_col='Date') ind_data.head() ind_data.tail() # Normalize the data to 100 and plot the results on a graph. (ind_data / ind_data.iloc[0] * 100).plot(figsize=(15, 6)); plt.show() # How would you explain the common and the different parts of the behavior of the three indices? # ***** # Obtain the simple returns of the indices. # + ind_returns = (ind_data / ind_data.shift(1)) - 1 ind_returns.tail() # - # Estimate the average annual return of each index. annual_ind_returns = ind_returns.mean() * 250 annual_ind_returns
23 - Python for Finance/2_Calculating and Comparing Rates of Return in Python/11_Calculating the Rate of Return of Indices (5:03)/Calculating the Return of Indices - Solution_CSV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import progressbar # + # honest network delay over next n blocks. def vectorDelayHonest(ps, es, init_endorsers = 24, delay_priority = 40, delay_endorse = 8): return (60 * len(ps) + delay_priority * sum(ps) + sum([delay_endorse * max(init_endorsers - e, 0) for e in es])) # attacking network delay over next n blocks. def vectorDelayAttacker(ps, es, init_endorsers = 24, delay_priority = 40, delay_endorse = 8): return (60 * len(ps) + delay_priority * sum(ps) + sum([delay_endorse * max(init_endorsers - e, 0) for e in es[1:]])) # efficient sample generation def getAH(alpha): x = np.random.geometric(1-alpha) if x == 1: h = 0 a = np.random.geometric(alpha) else: a = 0 h = x - 1 return [a, h] def rewardBlock(p, e): if p == 0: return e * 1.25 return e * 0.1875 def rewardEndorsement(p): if p == 0: return 1.25 return 0.8333333 def calcHonestSingle(p, e): if p == 0: return rewardBlock(0, 32) + e * rewardEndorsement(0) return e * rewardEndorsement(0) def calcAttackSingle(p, e): return rewardBlock(p, e) + e * rewardEndorsement(p) def vectorRewardHonest(ps, es): totalReward = 0 for i in range(len(ps)): totalReward += calcHonestSingle(ps[i], es[i]) return totalReward def vectorRewardAttack(ps, es): totalReward = calcAttackSingle(ps[0], 32) for i in range(1,len(ps)): totalReward += calcAttackSingle(ps[i], es[i]) return totalReward def calcCosts(ps, es): return vectorRewardHonest(ps, es) - vectorRewardAttack(ps, es) # - def getProbSelfish(alpha, length, sample_size = int(1e5), init_endorsers = 24, delay_priority = 40, delay_endorse = 8): bar = progressbar.ProgressBar() feasible_count = 0 for _ in bar(range(sample_size)): aVals = [] hVals = [] for i in range(length): a, h = getAH(alpha) aVals.append(a) hVals.append(h) eVals = np.random.binomial(32, alpha, size = length) honest_delay = vectorDelayHonest(hVals, 32 - eVals) selfish_delay = vectorDelayAttacker(aVals, eVals) if (selfish_delay <= honest_delay) and (calcCosts(aVals, eVals) < 0): feasible_count += 1 return feasible_count / sample_size
monte_carlo_cost.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lessons (Not) Learned: Chicago Health Inequities during the 1918 Flu and COVID-19. # # ## Historic 1918 Influenza and 2020 COVID-19 Pandemic in Chicago: Health Diaparity during pandemic for slaves and other black living in Chicago # ### Objectives # The purpose of this project is to illustrate the historical racial trauma in African Americans via tracing the linkage between 1918 flu pandemic and COVID-19 in Illinois. The project consists of three part of works: 1) digitizing and geoprocessing for historical analog data into digital resources, 2) identifying the neighborhood for each race, and 3) comparison between neighborhood and its mortality rate. As a part of result, several maps will be produced to achieve. This notebook would ptovide the intuition which racial group and factors were associated with the excess death rate in Chicago during pandemic. # ## 1. 1918 Influenza and pneumonia and COVID-19 data # All historic maps that given geospatial/geolocation information were digitized and analyzed using ArcGIS software. To assess whether descendants of slaves and other Blacks living in Chicago were vulnerable to excess death during the 1918 Influenza Pandemic, data from the book, A Report of an Epidemic of Influenza in Chicago Occurring during the Fall of 1918, was digitized and analyzed. In the book are seven consecutive weekly maps of Chicago from the week ending on October 5th to the week ending on November 16th that illustrate the locations of influenza deaths and pneumonia deaths. Each point refers only to the location of death and not any description of the person who died. # ### Methods # # #### 1. Preparation: Geo-referencing and Digitizing # # ##### (1) 1918 Historic Influenza map # # |DATE|<center>10/5/1918</center>|<center>10/12/1918</center>|<center>10/19/1918</center>|<center>10/26/1918</center>|<center>11/2/1918</center>|<center>11/9/1918</center>|<center>11/16/1918</center>| # |---|---|---|---|---|---|---|---| # |ORIGINAL MAP|<img src= "./Digitizing Map/Map_Page_1.jpg">|<img src= "./Digitizing Map/Map_Page_2.jpg">|<img src= "./Digitizing Map/Map_Page_3.jpg">|<img src= "./Digitizing Map/Map_Page_4.jpg">|<img src= "./Digitizing Map/Map_Page_5.jpg">|<img src= "./Digitizing Map/Map_Page_6.jpg">|<img src= "./Digitizing Map/Map_Page_7.jpg">| # # This original historic maps were used to perform the geo-referencing [Link](https://github.com/beckvalle/georef_demo/blob/main/Georeferencing_Instructions.ipynb) and digitizing process. # # #### Digitizing # |<img src= "./Digitizing Map/Digitizing.png" width = 70% height = 70%>|<img src= "./Digitizing Map/Digitizing_1.png" width = 70% height = 70%>| # |:---:|:---:| # |<center>Digitizing protocol from Original Map</center>|<center>Connecting folder</center>| # # |<img src= "./Digitizing Map/Digitizing_2.png" width = 70% height = 70%>|<img src= "./Digitizing Map/Digitizing_3.png" width = 70% height = 70%>| # |:---:|:---:| # |<center>Loading files (1) - All image files + street map</center>|<center>Loading files (2) + Empty point shapefile</center>| # # |<img src= "./Digitizing Map/Digitizing_4.png" width = 70% height = 70%>|<img src= "./Digitizing Map/Digitizing_5.png" width = 70% height = 70%>| # |:---:|:---:| # |<center>Editing (1)</center>|<center>Editing (2)</center>| # These degitized point map for each individual death were aggregated into the census tract map for better visualization # # |DATE|<center>10/5/1918</center>|<center>10/12/1918</center>|<center>10/19/1918</center>|<center>10/26/1918</center>|<center>11/2/1918</center>|<center>11/9/1918</center>|<center>11/16/1918</center>| # |---|---|---|---|---|---|---|---| # |INFLUENZA DEATH MAP|<img src= "./Digitizing Map/181005_Flu.jpg">|<img src= "./Digitizing Map/181012_Flu.jpg">|<img src= "./Digitizing Map/181019_Flu.jpg">|<img src= "./Digitizing Map/181026_Flu.jpg">|<img src= "./Digitizing Map/181102_Flu.jpg">|<img src= "./Digitizing Map/181109_Flu.jpg">|<img src= "./Digitizing Map/181116_Flu.jpg">| # |PNUEMONIA DEATH MAP|<img src= "./Digitizing Map/181005_Pna.jpg">|<img src= "./Digitizing Map/181012_Pna.jpg">|<img src= "./Digitizing Map/181019_Pna.jpg">|<img src= "./Digitizing Map/181026_Pna.jpg">|<img src= "./Digitizing Map/181102_Pna.jpg">|<img src= "./Digitizing Map/181109_Pna.jpg">|<img src= "./Digitizing Map/181116_Pna.jpg">| # For the analysis, all 7 influenza and pnuemonia death maps were merged into the one map to examine the total death during 7 weeks periods. # # # ##### 2. Identifying neighborhood # Identifying spatial boundaries is important to assess race and neighborhood. # # ###### 1) Community settlement map # Firstly, we used a community settlement map that is from the supplement of the book, Historic City – The settlement of Chicago, which is published by the department of development and planning the city of Chicago. This map not only shows the pattern of settlement of diverse racial groups in Chicago but also represents the cultural and cognitive boundary for each race. Moreover, the identified industrial area can provide environmental vulnerability based on its proximity. # # |<img src= "./Digitizing Map/1920.jpg">|<img src= "./Digitizing Map/CmmStl_Flu.jpg">|<img src= "./Digitizing Map/CmmStl_Pnu.jpg">| # |---|---|---| # |<center>Original scanned map</center>|<center>Degitized map with influenza death</center>|<center>Degitized map with pnuemonia death</center>| # # ###### 2) Census tract # To be released after publishing. # # # ###### 3) Census ward # To be released after publishing. # # # # #### Interactive map for 1918 Influenza pandemic (Incomplete. Functioning version to be released upon publishing) """# import libraries import numpy as np from datetime import date, datetime import pandas as pd import geopandas as gpd from bokeh.plotting import save, figure from bokeh.io import show, output_notebook, push_notebook from bokeh.models import GeoJSONDataSource, ColumnDataSource, CustomJS from bokeh.models import LinearColorMapper, ColorBar from bokeh.models import DataTable, DateFormatter, TableColumn from bokeh.models import HoverTool, TapTool, Div from bokeh.models import DateRangeSlider, Dropdown from bokeh.palettes import brewer from bokeh.events import Tap from bokeh.tile_providers import Vendors, get_provider from bokeh.layouts import gridplot, layout, column, row import networkx as nx import osmnx as ox import matplotlib.pyplot as plt import multiprocessing as mp import folium, itertools, os, time, warnings from shapely.geometry import Point, LineString, Polygon from tqdm import tqdm from IPython.display import display, clear_output warnings.filterwarnings("ignore")""" """FLU_1005 = gpd.read_file("./Influenza_CHI/P_181005_B.shp") FLU_1012 = gpd.read_file("./Influenza_CHI/P_181012_B.shp") FLU_1019 = gpd.read_file("./Influenza_CHI/P_181019_B.shp") FLU_1026 = gpd.read_file("./Influenza_CHI/P_181026_B.shp") FLU_1102 = gpd.read_file("./Influenza_CHI/P_181102_B.shp") FLU_1109 = gpd.read_file("./Influenza_CHI/P_181109_B.shp") FLU_1116 = gpd.read_file("./Influenza_CHI/P_181116_B.shp") PNA_1005 = gpd.read_file("./Influenza_CHI/P_181005_W.shp") PNA_1012 = gpd.read_file("./Influenza_CHI/P_181012_W.shp") PNA_1019 = gpd.read_file("./Influenza_CHI/P_181019_W.shp") PNA_1026 = gpd.read_file("./Influenza_CHI/P_181026_W.shp") PNA_1102 = gpd.read_file("./Influenza_CHI/P_181102_W.shp") PNA_1109 = gpd.read_file("./Influenza_CHI/P_181109_W.shp") PNA_1116 = gpd.read_file("./Influenza_CHI/P_181116_W.shp") FLU_1005_C=FLU_1005.to_crs(epsg=4326) FLU_1012_C=FLU_1012.to_crs(epsg=4326) FLU_1019_C=FLU_1019.to_crs(epsg=4326) FLU_1026_C=FLU_1026.to_crs(epsg=4326) FLU_1102_C=FLU_1102.to_crs(epsg=4326) FLU_1109_C=FLU_1109.to_crs(epsg=4326) FLU_1116_C=FLU_1116.to_crs(epsg=4326) PNA_1005_C=PNA_1005.to_crs(epsg=4326) PNA_1012_C=PNA_1012.to_crs(epsg=4326) PNA_1019_C=PNA_1019.to_crs(epsg=4326) PNA_1026_C=PNA_1026.to_crs(epsg=4326) PNA_1102_C=PNA_1102.to_crs(epsg=4326) PNA_1109_C=PNA_1109.to_crs(epsg=4326) PNA_1116_C=PNA_1116.to_crs(epsg=4326) FLU_1005_C['date']='1918/10/05' FLU_1012_C['date']='1918/10/12' FLU_1019_C['date']='1918/10/19' FLU_1026_C['date']='1918/10/26' FLU_1102_C['date']='1918/11/02' FLU_1109_C['date']='1918/11/09' FLU_1116_C['date']='1918/11/16' PNA_1005_C['date']='1918/10/05' PNA_1012_C['date']='1918/10/12' PNA_1019_C['date']='1918/10/19' PNA_1026_C['date']='1918/10/26' PNA_1102_C['date']='1918/11/02' PNA_1109_C['date']='1918/11/09' PNA_1116_C['date']='1918/11/16' FLU_1005_C['type']='Flu' FLU_1012_C['type']='Flu' FLU_1019_C['type']='Flu' FLU_1026_C['type']='Flu' FLU_1102_C['type']='Flu' FLU_1109_C['type']='Flu' FLU_1116_C['type']='Flu' PNA_1005_C['type']='Pna' PNA_1012_C['type']='Pna' PNA_1019_C['type']='Pna' PNA_1026_C['type']='Pna' PNA_1102_C['type']='Pna' PNA_1109_C['type']='Pna' PNA_1116_C['type']='Pna' FLU_1005_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in FLU_1005_C['date']] FLU_1012_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in FLU_1012_C['date']] FLU_1019_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in FLU_1019_C['date']] FLU_1026_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in FLU_1026_C['date']] FLU_1102_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in FLU_1102_C['date']] FLU_1109_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in FLU_1109_C['date']] FLU_1116_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in FLU_1116_C['date']] PNA_1005_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in PNA_1005_C['date']] PNA_1012_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in PNA_1012_C['date']] PNA_1019_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in PNA_1019_C['date']] PNA_1026_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in PNA_1026_C['date']] PNA_1102_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in PNA_1102_C['date']] PNA_1109_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in PNA_1109_C['date']] PNA_1116_C['timestamp'] = [datetime.timestamp(datetime.strptime(date, '%Y/%m/%d')) for date in PNA_1116_C['date']] FLU_1005_C ['rat_color'] = 'red' FLU_1012_C ['rat_color'] = 'red' FLU_1019_C ['rat_color'] = 'red' FLU_1026_C ['rat_color'] = 'red' FLU_1102_C ['rat_color'] = 'red' FLU_1109_C ['rat_color'] = 'red' FLU_1116_C ['rat_color'] = 'red' PNA_1005_C ['rat_color'] = 'blue' PNA_1012_C ['rat_color'] = 'blue' PNA_1019_C ['rat_color'] = 'blue' PNA_1026_C ['rat_color'] = 'blue' PNA_1102_C ['rat_color'] = 'blue' PNA_1109_C ['rat_color'] = 'blue' PNA_1116_C ['rat_color'] = 'blue' FLU_Point = FLU_1005_C FLU_Point = FLU_Point.append(FLU_1012_C) FLU_Point = FLU_Point.append(FLU_1019_C) FLU_Point = FLU_Point.append(FLU_1026_C) FLU_Point = FLU_Point.append(FLU_1102_C) FLU_Point = FLU_Point.append(FLU_1109_C) FLU_Point = FLU_Point.append(FLU_1116_C) PNA_Point = PNA_1005_C PNA_Point = PNA_Point.append(PNA_1012_C) PNA_Point = PNA_Point.append(PNA_1019_C) PNA_Point = PNA_Point.append(PNA_1026_C) PNA_Point = PNA_Point.append(PNA_1102_C) PNA_Point = PNA_Point.append(PNA_1109_C) PNA_Point = PNA_Point.append(PNA_1116_C) Final_Point = FLU_Point.append(PNA_Point) # record data sources for bokeh tl_data= [['10/05/1918',"Week 1"], ['10/26/1918',"Week 4"], ['11/16/1918',"Week 7"]] tl_df= pd.DataFrame(tl_data, columns = ['tl_date', 'tl_note']) tl_df['tl_ts'] = [datetime.timestamp(datetime.strptime(date, '%m/%d/%Y'))*1000 for date in tl_df['tl_date']]""" # + """Final_source = GeoJSONDataSource(geojson=Final_Point.to_json()) Final_disp_source = GeoJSONDataSource(geojson=Final_Point.to_json()) timeline_source = ColumnDataSource(tl_df) tile_provider2 = get_provider(Vendors.CARTODBPOSITRON) s4 = figure(title="Influenza Map", plot_width=500, x_range=(-9800000, -9740000), y_range=(5100000, 5170000), tooltips=[("", "@Title")], tools="tap") s4.toolbar.logo = None s4.toolbar_location = None s4.add_tile(tile_provider2) # plot points for movie locations Final_pts = s4.circle(x='x', y='y', source=Final_disp_source, fill_color='rat_color', legend_label='Flu', size=5) print(Final_pts) # plot points for movie locations Final_pts = s4.circle(x='x', y='y', source=Final_disp_source, fill_color='rat_color', legend_label='Flu', size=5) # add a dropdown menu to change the point color menu = [("Default", "default"), ("Type", "type")] callback_p = CustomJS(args = dict(source = Final_source, fill_source = Final_disp_source, s4 = s4, Final_pts = Final_pts), """ code = """ var data = source.data; var fill_data = fill_source.data; var choice = this.item; if (choice == "Flu") { fill_data['rat_color'] = []; for (var i = 0; i < data.x.length; i++) { if (fill_data['type'][i] == "Flu") { fill_data['rat_color'].push('red'); } } Final_pts.glyph.fill_color.field = 'rat_color'; } else { Final_pts.glyph.fill_color = 'blue'; } fill_source.change.emit(); """) """dropdown = Dropdown(label="Change Point Color", button_type="warning", menu=menu) dropdown.js_on_event("menu_item_click", callback_p) # add a data range slider callback_t = CustomJS(args = dict(source = Final_source, fill_source = Final_disp_source), """ code = """ var data = source.data; var fill_data = fill_source.data; var s_val = cb_obj.value; fill_data['x']=[]; fill_data['y']=[]; for (var i = 0; i < data.x.length; i++) { if ((data['timestamp'][i] >= (s_val[0]/1000)) && (data['timestamp'][i] <= (s_val[1]/1000))) { fill_data['y'].push(source.data['y'][i]); fill_data['x'].push(source.data['x'][i]); } else { fill_data['y'].push(NaN); fill_data['x'].push(NaN); } } fill_source.change.emit(); """) """date_range_slider = DateRangeSlider(value=(date(1918, 10, 1), date(1918, 11, 15)), start=date(1918, 6, 1), end=date(1918, 12, 31)) date_range_slider.js_on_change("value", callback_t) div = Div(text="""""", width=200, height=100) # add tap tool to display text when point is selected taptool = s4.select(type=TapTool) taptool.callback = CustomJS(args = dict(source = Final_disp_source, div = div), """ code = """ var data = source.data; const inds = source.selected.indices; var base_str = ""; for (const indx in inds) { base_str = base_str.concat('id: ', inds[indx].toString(), '<br />'); base_str = base_str.concat('date: ', data['Date'][inds[indx]], '<br />'); base_str = base_str.concat('title: ', data['type'][inds[indx]], '<br />'); } div.text = base_str; """) """tl_hover = HoverTool( tooltips=n <div> <div> <img src="@tl_image" height="42" width="42" style="float: left; margin: 0px 15px 15px 0px;" onerror="this.style.display='none'" ></img> </div> <div> <span style="font-size: 17px; font-weight: bold;">@tl_date</span> <br> <span style="font-size: 15px; color: #966;">@tl_note</span> </div> </div> """) """s5 = figure(title="Timeline", x_axis_type='datetime', tools=[tl_hover], plot_height=100, # multiply x range by 1000 to convert between microseconds x_range=(datetime.timestamp(datetime.strptime('09/01/1918', '%m/%d/%Y'))*1000, datetime.timestamp(datetime.strptime('12/31/1918', '%m/%d/%Y'))*1000), y_range=(0.5, 1.5)) s5.yaxis.major_label_text_font_size = '0pt' s5.toolbar.logo = None s5.toolbar_location = None tl_pts = s5.triangle(x='tl_ts', y=1, source=timeline_source, fill_color='red', size=20) date_range_slider.js_link('value', s5.x_range, 'start') date_range_slider.js_link('value', s5.x_range, 'end') t = show((column(row(s4, div), dropdown, date_range_slider, s5)), notebook_handle=True)""" # - # ###### 4) Race and neighborhood # To be released after publishing. # # # ##### Ffinal maps for influezan and pneumonia death # To be released after publishing. # # # ## 2. COVID-19 # To be released after publishing. # # # #
Mendenhall_Geospatial_Fellowship.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 模型正则化 # [![PR8.png](https://i.postimg.cc/VNX7bC17/PR8.png)](https://postimg.cc/zLXws3yW) # # - 这种正则化方式称为**岭回归(Ridge Regression)**方式 # ### 1. 准备 import numpy as np import matplotlib.pyplot as plt np.random.seed(333) x = np.random.uniform(-3.0, 3.0, size=100) X = x.reshape(-1, 1) y = 0.5 * x + 3 + np.random.normal(0, 1, size=100) plt.scatter(x, y) # + from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression def PolynomialRegression(degree): return Pipeline([ ("poly", PolynomialFeatures(degree=degree)), ("std_scaler", StandardScaler()), ("lin_reg", LinearRegression()) ]) # + from sklearn.model_selection import train_test_split np.random.seed(333) X_train, X_test, y_train, y_test = train_test_split(X, y) # + from sklearn.metrics import mean_squared_error poly_reg = PolynomialRegression(degree=20) poly_reg.fit(X_train, y_train) y_poly_pridect = poly_reg.predict(X_test) mean_squared_error(y_test, y_poly_pridect) # + X_plot = np.linspace(-3, 3, 100).reshape(100, 1) y_plot = poly_reg.predict(X_plot) plt.scatter(x, y) plt.plot(X_plot[:, 0], y_plot, color="r") plt.axis([-3, 3, 0, 6]) # - # - 可以看出,方差太大,发生过拟合 # + def plot_model(model): X_plot = np.linspace(-3, 3, 100).reshape(100, 1) y_plot = model.predict(X_plot) plt.scatter(x, y) plt.plot(X_plot[:, 0], y_plot, color="r") plt.axis([-3, 3, 0, 6]) plot_model(poly_reg) # - # ### 1. 岭回归 # + from sklearn.linear_model import Ridge def RidgeRegression(degree, alpha): return Pipeline([ ("poly", PolynomialFeatures(degree=degree)), ("std_scaler", StandardScaler()), ("ridge_reg", Ridge(alpha=alpha)) ]) # 对于岭回归,最后添加的一项是 theta 的平方,所以需要一个很小的 alpha 来调节 ridge1_reg = RidgeRegression(20, 0.0001) ridge1_reg.fit(X_train, y_train) y1_pridect = ridge1_reg.predict(X_test) mean_squared_error(y_test, y1_pridect) # - # - 可以看出,经过岭回归正则化,均方误差变小 plot_model(ridge1_reg) # + ridge2_reg = RidgeRegression(20, 1) ridge2_reg.fit(X_train, y_train) y2_pridect = ridge2_reg.predict(X_test) mean_squared_error(y_test, y2_pridect) # - plot_model(ridge2_reg) # ### 2. LASSO 回归 # [![PR10.png](https://i.postimg.cc/bJnrHqdN/PR10.png)](https://postimg.cc/xJ099DKW) # + from sklearn.linear_model import Lasso def LassoRegression(degree, alpha): return Pipeline([ ("poly", PolynomialFeatures(degree=degree)), ("std_scaler", StandardScaler()), ("ridge_reg", Lasso(alpha=alpha)) ]) # 对于Lasso,最后添加的一项是 theta 的绝对值,所以不需要一个很小的 alpha 来调节 lasso1_reg = LassoRegression(20, 0.01) lasso1_reg.fit(X_train, y_train) y1_pridect_lasso = lasso1_reg.predict(X_test) mean_squared_error(y_test, y1_pridect_lasso) # - plot_model(lasso1_reg) # + lasso2_reg = LassoRegression(20, 0.1) lasso2_reg.fit(X_train, y_train) y2_pridect_lasso = lasso2_reg.predict(X_test) mean_squared_error(y_test, y2_pridect_lasso) # - plot_model(lasso2_reg) # + lasso3_reg = LassoRegression(20, 1) lasso3_reg.fit(X_train, y_train) y3_pridect_lasso = lasso3_reg.predict(X_test) mean_squared_error(y_test, y3_pridect_lasso) # - plot_model(lasso3_reg) # ### 3. 比较 # # [![PR11.png](https://i.postimg.cc/sf6fQBT9/PR11.png)](https://postimg.cc/23WNg5RV) # # [![PR12.png](https://i.postimg.cc/yNH740NB/PR12.png)](https://postimg.cc/WdXB0Frf) # # [![PR13.png](https://i.postimg.cc/xj4Yjwd6/PR13.png)](https://postimg.cc/s17bw07W) # # [![PR14.png](https://i.postimg.cc/W3fR3Zsd/PR14.png)](https://postimg.cc/vg922cgy) # # - Ridge 和 LASSO 衡量正则化 # - MSE 和 MAE 衡量模型的好坏 # - 欧拉距离 和 曼哈顿距离衡量距离大小
ML-Base-MOOC/chapt-6 Polynomial-Regression/06-Regularization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Packages # + import numpy as np from scipy.ndimage.filters import uniform_filter from scipy.optimize import root from functools import reduce from scipy.signal import correlate from scipy.interpolate import interp1d from shapely.geometry import LineString, Polygon, Point from zipfile import ZipFile import xml.sax, xml.sax.handler from pyproj import Proj, transform from osgeo import gdal import pandas as pd import time import datetime import pickle from pathlib import Path from descartes import PolygonPatch from matplotlib import pyplot as plt import matplotlib.cm as cm from matplotlib.collections import PatchCollection, LineCollection from matplotlib.patches import Rectangle from matplotlib.ticker import NullFormatter # %matplotlib inline plt.rcParams['text.usetex'] = True #Let TeX do the typsetting plt.rcParams['text.latex.preamble'] = [r'\usepackage{sansmath}', r'\sansmath'] #Force sans-serif math mode (for axes labels) plt.rcParams['font.family'] = 'sans-serif' # ... for regular text plt.rcParams['font.sans-serif'] = 'Helvetica' # Choose a nice font here fs = 8 from IPython.core.display import display, HTML display(HTML("<style>.container { width:80% !important; }</style>")) # - # # Constants # + s2y = 1/(365*24*60*60) m2km = 1e-3 m2mm = 1e+3 af0 = 25 # direction clockwise from east in degrees that the dunes move af0_r = af0*np.pi/180 adelta = 15 # permissible variation around af0 in some calculations # representative constants rhos = 2380 # gypsum rhof = 1.225 # air grav = 9.81 # gravity gdia = 300e-6 # aeolian grain diameter kap = 0.41 # von karman z00 = 1e-4 # roughness length of upwind z01 = 1e-1 # roughness length of downwind zU = 10 # height of met tower wind f = 7.2921*10**-5*2*np.sin(32.87*np.pi/180) # coriolis frequency at the location uscr = 0.3 # approximate value (jerolmack et al 2011 use 0.39 and iverson 1987 would be 0.24) # uscr = (rhos*grav*gdia/rhof)**0.5/10 # iverson et al 1987 # + dtdz_Uthr = 2.5e-7 # threshold maximum change in dzdt we expect - anything larger would be ~7.9 m/yr slopethr = 1e-2 # minimum slope of surfaces included in the flux calculations repose = np.tan(30*np.pi/180) # angle of repose is like 34 degrees dzdtthr = 1e-8 # threshold minimum change used in the flux calculations distthr = 10 # minimum distance that a flux calculation will be integrated over DS = 500 # meters over which the dune orientation distribution is segmented stdwidth = 100 # moving box window width in meters over which the elevation standard deviation is calculated uwimax = 5547 # index where the mutual dem upwind edge first intersects the dunefield upwind boundary dt0 = 21168000 #seconds between jan09 and sep09 dt1 = 21859200 #seconds between sep09 and jun10 dt2 = dt0+dt1 #seconds between jan09 and jun10 dempairs = [[0,1],[1,2],[0,2]] demdts = [dt0,dt1,dt2] # + eps = 1e-2 # 1+-eps for speed saturation envelope Nt = 1000 # number of timesteps Ng = 10 # number of forcing steps Nz = 100 # number of vertical grid cells phi = 0.74 # packing fraction of sand for landscape evolution Z = 1000 # Z is the ABL flow height from momen & bou-zeid 2016 L0 = 50 # L0 is the eddy mixing length scale from momen & bou-zeid 2016 z_t = np.logspace(np.log10(1),np.log10(100),Nz) # vertical grid, log-spaced between the 1m and ABL height T = 60*60*30 # total model time t_t = np.logspace(0,np.log10(T),Nt) # the time step grid # - # # Functions # ## DEM # + ############### ## FUNCTION DEFS TO PARSE KMZ ## ############### class PlacemarkHandler(xml.sax.handler.ContentHandler): def __init__(self): self.inName = False # handle XML parser events self.inPlacemark = False self.mapping = {} self.buffer = "" self.name_tag = "" def startElement(self, name, attributes): if name == "Placemark": # on start Placemark tag self.inPlacemark = True self.buffer = "" if self.inPlacemark: if name == "name": # on start title tag self.inName = True # save name text to follow def characters(self, data): if self.inPlacemark: # on text within tag self.buffer += data # save text if in title def endElement(self, name): self.buffer = self.buffer.strip('\n\t') if name == "Placemark": self.inPlacemark = False self.name_tag = "" #clear current name elif name == "name" and self.inPlacemark: self.inName = False # on end title tag self.name_tag = self.buffer.strip() self.mapping[self.name_tag] = {} elif self.inPlacemark: if name in self.mapping[self.name_tag]: self.mapping[self.name_tag][name] += self.buffer else: self.mapping[self.name_tag][name] = self.buffer self.buffer = "" def build_table(mapping): sep = ',' output = 'Name' + sep + 'Coordinates\n' points = '' lines = '' shapes = '' for key in mapping: coord_str = mapping[key]['coordinates'] + sep if 'LookAt' in mapping[key]: #points points += key + sep + coord_str + "\n" elif 'LineString' in mapping[key]: #lines lines += key + sep + coord_str + "\n" else: #shapes shapes += key + sep + coord_str + "\n" output += points + lines + shapes return output # + # fast function to make a moving window that finds the standard deviation of dem within it def window_std(X, window_size): r,c = X.shape X+=np.random.rand(r,c)*1e-6 # add some tiny tiny noise because the values are discrete c1 = uniform_filter(X, window_size, mode='reflect') c2 = uniform_filter(X*X, window_size, mode='reflect') return np.sqrt(c2 - c1*c1) # + # function to calibrate the dem difference maps because each flight is slightly bias in absolute elevation # done by linear shift in the dzdt maps such that they all have a net value of zero # this method is used because the majority of each map is interdune or non-erodible material def demcorrect(dems,dempairs,demdts): binno = 200 dzdtcs = [] for i in range(len(dempairs)): dzdt = (dems[dempairs[i][1]]-dems[dempairs[i][0]])/demdts[i] dzdt[np.abs(dzdt)>dtdz_Uthr] = np.nan temp = dzdt[~np.isnan(dzdt)] dzdtcnt,dzdtbins = np.histogram(temp,bins=np.linspace(np.percentile(temp,2),np.percentile(temp,98),binno)) dzdtbinmids = dzdtbins[1:] + np.diff(dzdtbins)/2 dzdtcs.append(dzdt - dzdtbinmids[np.argmax(dzdtcnt)]) return dzdtcs # + # function to find the sediment flux from a pair of dems # done by integrating along segments of transects in the along-flux direction (x) # where each segment is assumed to start with a zero flux boundary def demflux(dem_i,dem_f,dzdt,x): qs = np.empty([np.shape(dem_i)[0],np.shape(dem_i)[1]-1]) qs[:] = np.nan dx = np.diff(x) for i in np.arange(np.shape(dem_i)[0]): tempy = np.copy(dzdt[i,:]) tempd0 = np.diff(dem_i[i,:])/dx tempd1 = np.diff(dem_f[i,:])/dx tempy = tempy[1:] # get rid of pixels where land in both dems is flat, and the change is not spuriously large tempy[(np.abs(tempd0)<slopethr)&(np.abs(tempd1)<slopethr)&(np.abs(tempy)<dzdtthr)] = np.nan # only include segments that are at least 10m long indso = np.squeeze(np.argwhere(np.isnan(tempy))) indsn = [] for j in np.arange(np.shape(indso)[0]-1): if np.diff(indso)[j]<=distthr: indsn.append(np.arange(indso[j],indso[j+1])) tempy[np.concatenate(indsn).ravel()] = np.nan temp1 = np.squeeze(np.argwhere(~np.isnan(tempy))) temp1i = temp1[np.squeeze(np.argwhere(np.diff(temp1)!=1))] temp2 = np.squeeze(np.argwhere(np.isnan(tempy))) temp2i = temp2[np.squeeze(np.argwhere(np.diff(temp2)!=1))]+1 # do exner on each segment if np.shape(temp1i): for j in np.arange(np.shape(temp1i)[0]): qs[i,temp2i[j]:temp1i[j]+1] = -phi*np.cumsum(tempy[temp2i[j]:temp1i[j]+1])*dx[0] else: qs[i,temp2i[0]:temp1i+1] = -phi*np.cumsum(tempy[temp2i[0]:temp1i+1])*dx[0] return qs # - # function to grab the dip angles on any slipfaces def ae(z): dzdx = np.gradient(z,axis=1) dzdy = np.gradient(z,axis=0) dzdx[(dzdx**2+dzdy**2)**0.5<repose] = np.nan dzdy[(dzdx**2+dzdy**2)**0.5<repose] = np.nan return np.arctan2(-dzdy,-dzdx)*180/np.pi # unfortunately distributions of slipface angles are heavily biased by DEM gridding # have to interpolate across neighbours for probabilties in a distrubtion for angles between corners of a grid def ridspuriousangles(acnt,abinmids): ta = np.copy(acnt) for i in np.arange(-8,8)*45: ti = np.argwhere(abinmids==i) ta[ti] = (ta[ti-1]+ta[ti+1])/2 ti = np.argwhere(abinmids==8*45) ta[-1] = (ta[-2]+ta[0])/2 return ta # + # function to grab the probability distribution of characteristic dune direction on a dem # has to be done on a dem with NSEW gridding bc of the issue addressed by the function above^ # the characteristic dune direction for a sub-tile of dem is found by cross-correlating its # distribution of slip face angles with a flipped version of itself then using the angle where # the correlation is maximized. it's done this way since most parabolic or barchan dunes slip # face dips are actually not in the dune direction, and sometimes their arms are different lengths def angledist(Z,x,y,da,domain): Yis = np.arange(0,int(np.shape(Z)[0]),DS,dtype=int) Xis = np.arange(0,int(np.shape(Z)[1]),DS,dtype=int) dd = [] da1 = 1/2 abins = np.arange(-360-da1,360+2*da1,2*da1) abinmids = abins[1:] - da1 ae_z = ae(Z) for i in range(len(Yis)-1): for j in range(len(Xis)-1): tempz = Z[Yis[i]:Yis[i+1],Xis[j]:Xis[j+1]] x0y0 = Point(x[Xis[j]],y[Yis[i]]) x0y1 = Point(x[Xis[j]],y[Yis[i+1]]) x1y0 = Point(x[Xis[j+1]],y[Yis[i]]) x1y1 = Point(x[Xis[j+1]],y[Yis[i+1]]) corners = [x0y0,x0y1,x1y0,x1y1] if np.any(np.isnan(tempz)): continue elif np.any([not domain.contains(point) for point in corners]): continue else: temp = ae_z[Yis[i]:Yis[i+1],Xis[j]:Xis[j+1]] tempa = temp[~np.isnan(temp)] acnt,_ = np.histogram(tempa,bins=abins) tempc = ridspuriousangles(acnt,abinmids) p = correlate(tempc,np.flip(tempc),mode='same') dd.append(abinmids[np.argmax(p)]/2) mAf = np.arange(-180,180+da,da) mAmidf = mAf[1:]-da/2 pdd,_ = np.histogram(dd,bins=mAf) pdd = pdd/np.sum(pdd) return pdd,mAmidf # - # ## In-situ # + # this function returns directions of wind vectors af, as +-180 from af0 def bearing_2_pm180_af0(ai,af0): # ai is the bearing of the wind # af0 is the direction of the wind vector you wish to set as the origin af = np.copy(ai) af = -af - af0 - 90 af[af<-180] = af[af<-180] + 360 return af t0 = 20 t1 = range(360) t2 = bearing_2_pm180_af0(t1,t0) fig = plt.gcf() plt.plot(t1,t2) plt.xlim(0,360) plt.ylim(-180,180) plt.xlabel('bearing',fontsize=fs*2) plt.ylabel('+/- from origin at %d clockwise from east'%t0,fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) fig.set_size_inches(6, 6, forward=True) # + # gives you the volume flux scalar given wind speeds and grain diameter (not summed over time) def flux(U,zU,z0): usabs = U*kap/np.log(zU/z0) # neutral law of the wall usabs[usabs<uscr] = uscr # get rid of negative unreal data constant = 2.8669917319 # this is the average value from Li et al (2010) and Ho et al (2011) as reported in Kok review, figure 2.15 return constant*rhof/rhos/grav*uscr*usabs**2*(1-(uscr/usabs)**2) # this is Q_DK in Table 2.1 of Kok review # does the inverse of the function above def ufromflux(q,zU,z0): constant = 2.8669917319 return (q/uscr/constant/rhof*rhos*grav+uscr**2)**(1/2)/kap*np.log(zU/z0) u1 = np.linspace(0,30,100) u2 = ufromflux(flux(u1,zU,z00),zU,z00) fig = plt.gcf() plt.plot(u1,u2) plt.plot([0,30],[0,30]) plt.xlabel('$U_{original}$ (m/s)',fontsize=fs*2) plt.ylabel('$f^{-1}(f(U_{original}))$ (m/s)',fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) fig.set_size_inches(6, 6, forward=True) # - # ## Theory # this is the turbulence closure model # it is a damping coefficient proportional to the forcing and mixing as a function of height def a(Ag,z,z0,Z,L0): # Ag is the geostrophic flow speed # L0 is the eddy mixing length scale # z0 is the roughness length scale # Z is the ABL flow height return Ag*L0*np.log(z/z0)**-1*Z**-2 # + # functions to find a geostrophic flow for a given speed-elevation pair # for the closure model a def findg(g,u,z,z0): return np.abs(g/(1-1j*a(g,z,z0,Z,L0)/f))-u def gfromu(u,z,z0): return root(findg,1,args=(u,z,z0),method='lm').x[0] # - # # Data # ## DEM # To create the DEMs, made as .tif files, I used apdal functions via terminal. These are the steps to create them: # 1. Download the the 3 point clouds from OpenTopography as .las files # 2. These are in [x,y] convention, to create [x_r,y_r] versions run the command # `pdal translate -i andle-ws-MMMYY.las -o andle-ws-MMMYY-rotated.las -f transformation --filters.transformation.matrix="0.923879563808 0.382683426142 0 0 -0.382683426142 0.923879563808 0 0 0 0 1 0 0 0 0 1"` where MMMYY is like 'jun10' and the 4x4 matrix values are explained in 335deg_rotation_4matrix.txt # 3. Now to create DEMs from each pair of .las files, run the commands `pdal pipeline dtm-common-grid.json` and `pdal pipeline dtm-rotated-common-grid.json` to generate the .tif files # # The .json files have a `"bounds"` parameter which is set by the bounds of the union of the 3 .las files, meaning that all .tifs will have common grids. # + # file locations jan09 = 'DEMs/dem-zmin-andle-ws-jan09-common-grid.tif' sep09 = 'DEMs/dem-zmin-andle-ws-sep09-common-grid.tif' jun10 = 'DEMs/dem-zmin-andle-ws-jun10-common-grid.tif' jan09r= 'DEMs/dem-zmin-andle-ws-jan09-rotated335-common-grid.tif' # r stands for rotated sep09r= 'DEMs/dem-zmin-andle-ws-sep09-rotated335-common-grid.tif' jun10r= 'DEMs/dem-zmin-andle-ws-jun10-rotated335-common-grid.tif' # + #parse the kmz kmz = ZipFile('google-earth-files/jan09_lidar.kmz', 'r') kml = kmz.open('doc.kml', 'r') parser = xml.sax.make_parser() handler = PlacemarkHandler() parser.setContentHandler(handler) parser.parse(kml) kmz.close() # find coordinate list in parsed kmz coords = handler.mapping[list(handler.mapping)[0]]["coordinates"] my_list = coords.split(",") #restructure string into a useful array verts = my_list[:-1] vertsclean = np.empty([int(np.shape(verts)[0]/2),2]) polystart = [] #finds indices where new polys start and end for j in np.arange(0,np.shape(verts)[0]): if ' ' in verts[j]: polystart.append(int(j/2)) verts[j]=verts[j].replace(' ','') verts[j]=verts[j].replace('0 ','') verts[j]=verts[j].replace('0-','-') if j%2==0: vertsclean[int(j/2),0] = verts[j] else: vertsclean[int(j/2),1] = verts[j] vertups_o = np.asarray(vertsclean) jan09Vertups_o = np.empty_like(vertups_o) p_lonlat = Proj(init='epsg:4326', preserve_units=False) p_lidar = Proj(init='epsg:26913', preserve_units=False) jan09Vertups_o[:,0], jan09Vertups_o[:,1] = transform(p_lonlat, p_lidar, vertups_o[:,0], vertups_o[:,1]) jan09_domain_o = Polygon(jan09Vertups_o) jan09_domain_o # + #parse the kmz kmz = ZipFile('google-earth-files/sep09_lidar.kmz', 'r') kml = kmz.open('doc.kml', 'r') parser = xml.sax.make_parser() handler = PlacemarkHandler() parser.setContentHandler(handler) parser.parse(kml) kmz.close() # find coordinate list in parsed kmz coords = handler.mapping[list(handler.mapping)[0]]["coordinates"] my_list = coords.split(",") #restructure string into a useful array verts = my_list[:-1] vertsclean = np.empty([int(np.shape(verts)[0]/2),2]) polystart = [] #finds indices where new polys start and end for j in np.arange(0,np.shape(verts)[0]): if ' ' in verts[j]: polystart.append(int(j/2)) verts[j]=verts[j].replace(' ','') verts[j]=verts[j].replace('0 ','') verts[j]=verts[j].replace('0-','-') if j%2==0: vertsclean[int(j/2),0] = verts[j] else: vertsclean[int(j/2),1] = verts[j] vertups_o = np.asarray(vertsclean) sep09Vertups_o = np.empty_like(vertups_o) p_lonlat = Proj(init='epsg:4326', preserve_units=False) p_lidar = Proj(init='epsg:26913', preserve_units=False) sep09Vertups_o[:,0], sep09Vertups_o[:,1] = transform(p_lonlat, p_lidar, vertups_o[:,0], vertups_o[:,1]) sep09_domain_o = Polygon(sep09Vertups_o) sep09_domain_o # + #parse the kmz kmz = ZipFile('google-earth-files/jun10_lidar.kmz', 'r') kml = kmz.open('doc.kml', 'r') parser = xml.sax.make_parser() handler = PlacemarkHandler() parser.setContentHandler(handler) parser.parse(kml) kmz.close() # find coordinate list in parsed kmz coords = handler.mapping[list(handler.mapping)[0]]["coordinates"] my_list = coords.split(",") #restructure string into a useful array verts = my_list[:-1] vertsclean = np.empty([int(np.shape(verts)[0]/2),2]) polystart = [] #finds indices where new polys start and end for j in np.arange(0,np.shape(verts)[0]): if ' ' in verts[j]: polystart.append(int(j/2)) verts[j]=verts[j].replace(' ','') verts[j]=verts[j].replace('0 ','') verts[j]=verts[j].replace('0-','-') if j%2==0: vertsclean[int(j/2),0] = verts[j] else: vertsclean[int(j/2),1] = verts[j] vertups_o = np.asarray(vertsclean) jun10Vertups_o = np.empty_like(vertups_o) p_lonlat = Proj(init='epsg:4326', preserve_units=False) p_lidar = Proj(init='epsg:26913', preserve_units=False) jun10Vertups_o[:,0], jun10Vertups_o[:,1] = transform(p_lonlat, p_lidar, vertups_o[:,0], vertups_o[:,1]) jun10_domain_o = Polygon(jun10Vertups_o) jun10_domain_o # + # grab dems x y and z arrays ds = gdal.Open(sep09, gdal.GA_ReadOnly) rb = ds.GetRasterBand(1) metaxy = ds.GetGeoTransform() z_sep09 = np.flip(rb.ReadAsArray(),axis=0) x = metaxy[0]+metaxy[1]*np.arange(0,np.shape(z_sep09)[1]) y = np.flip(metaxy[3]+metaxy[5]*np.arange(0,np.shape(z_sep09)[0])) X,Y = np.meshgrid(x,y) # - ds = gdal.Open(jun10r, gdal.GA_ReadOnly) rb = ds.GetRasterBand(1) metaxy = ds.GetGeoTransform() zr_jun10 = np.flip(rb.ReadAsArray(),axis=0) zr_jun10[zr_jun10<0] = np.nan xr = metaxy[0]+metaxy[1]*np.arange(0,np.shape(zr_jun10)[1]) yr = np.flip(metaxy[3]+metaxy[5]*np.arange(0,np.shape(zr_jun10)[0])) # + ds = gdal.Open(jan09r, gdal.GA_ReadOnly) rb = ds.GetRasterBand(1) metaxy = ds.GetGeoTransform() zr_jan09 = np.flip(rb.ReadAsArray(),axis=0) zr_jan09[zr_jan09<0] = np.nan ds = gdal.Open(sep09r, gdal.GA_ReadOnly) rb = ds.GetRasterBand(1) zr_sep09 = np.flip(rb.ReadAsArray(),axis=0) zr_sep09[zr_sep09<0] = np.nan dems = [zr_jan09,zr_sep09,zr_jun10] del zr_jan09, zr_sep09 # - # ## In-situ # ### Spatial # + #parse the kmz kmz = ZipFile('google-earth-files/upwind_margin.kmz', 'r') kml = kmz.open('doc.kml', 'r') parser = xml.sax.make_parser() handler = PlacemarkHandler() parser.setContentHandler(handler) parser.parse(kml) kmz.close() # find coordinate list in parsed kmz coords = handler.mapping[list(handler.mapping)[0]]["coordinates"] my_list = coords.split(",") #restructure string into a useful array verts = my_list[:-1] vertsclean = np.empty([int(np.shape(verts)[0]/2),2]) polystart = [] #finds indices where new polys start and end for j in np.arange(0,np.shape(verts)[0]): if ' ' in verts[j]: polystart.append(int(j/2)) verts[j]=verts[j].replace(' ','') verts[j]=verts[j].replace('0 ','') verts[j]=verts[j].replace('0-','-') if j%2==0: vertsclean[int(j/2),0] = verts[j] else: vertsclean[int(j/2),1] = verts[j] vertups_o = np.asarray(vertsclean) upwindVertups_o = np.empty_like(vertups_o) p_lonlat = Proj(init='epsg:4326', preserve_units=False) p_lidar = Proj(init='epsg:26913', preserve_units=False) upwindVertups_o[:,0], upwindVertups_o[:,1] = transform(p_lonlat, p_lidar, vertups_o[:,0], vertups_o[:,1]) upwind_boundary_o = LineString(upwindVertups_o) upwind_boundary_o # - # rotate the polygon above to match the rotated dems upwindVertups_r = np.empty_like(upwindVertups_o) upwindVertups_r[:,0] = upwindVertups_o[:,0]*np.cos(af0_r)+upwindVertups_o[:,1]*np.sin(af0_r) upwindVertups_r[:,1] = -upwindVertups_o[:,0]*np.sin(af0_r)+upwindVertups_o[:,1]*np.cos(af0_r) upwind_boundary_r = LineString(upwindVertups_r) # + #met tower locations in this coordinate system x_a_o, y_a_o = transform(p_lonlat, p_lidar, -106.289875, 32.863286) tempx2 = np.max(upwindVertups_o[:,0]) tempy2 = y_a_o + (tempx2-x_a_o)*np.tan(af0*np.pi/180) tempp3 = LineString([(x_a_o,y_a_o),(tempx2,tempy2)]).intersection(upwind_boundary_o) s_a_o = -LineString([(tempp3.coords[0][0],tempp3.coords[0][1]),(x_a_o, y_a_o)]).length x_b_o, y_b_o = transform(p_lonlat, p_lidar, -106.252210, 32.875672) tempx2 = np.min(upwindVertups_o[:,0]) tempy2 = y_b_o - (x_b_o-tempx2)*np.tan(af0*np.pi/180) tempp3 = LineString([(x_b_o,y_b_o),(tempx2,tempy2)]).intersection(upwind_boundary_o) s_b_o = LineString([(tempp3.coords[0][0],tempp3.coords[0][1]),(x_b_o, y_b_o)]).length x_c_o, y_c_o = transform(p_lonlat, p_lidar, -106.230702, 32.885072) tempx2 = np.min(upwindVertups_o[:,0]) tempy2 = y_c_o - (x_c_o-tempx2)*np.tan(af0*np.pi/180) tempp3 = LineString([(x_c_o,y_c_o),(tempx2,tempy2)]).intersection(upwind_boundary_o) s_c_o = LineString([(tempp3.coords[0][0],tempp3.coords[0][1]),(x_c_o, y_c_o)]).length # + #parse the kmz kmz = ZipFile('google-earth-files/ws_duneboundary.kmz', 'r') kml = kmz.open('doc.kml', 'r') parser = xml.sax.make_parser() handler = PlacemarkHandler() parser.setContentHandler(handler) parser.parse(kml) kmz.close() # find coordinate list in parsed kmz coords = handler.mapping[list(handler.mapping)[0]]["coordinates"] my_list = coords.split(",") #restructure string into a useful array verts = my_list[:-1] vertsclean = np.empty([int(np.shape(verts)[0]/2),2]) polystart = [] #finds indices where new polys start and end for j in np.arange(0,np.shape(verts)[0]): if ' ' in verts[j]: polystart.append(int(j/2)) verts[j]=verts[j].replace(' ','') verts[j]=verts[j].replace('0 ','') verts[j]=verts[j].replace('0-','-') if j%2==0: vertsclean[int(j/2),0] = verts[j] else: vertsclean[int(j/2),1] = verts[j] vertups_o = np.asarray(vertsclean) dfVertups_o = np.empty_like(vertups_o) p_lonlat = Proj(init='epsg:4326', preserve_units=False) p_lidar = Proj(init='epsg:26913', preserve_units=False) dfVertups_o[:,0], dfVertups_o[:,1] = transform(p_lonlat, p_lidar, vertups_o[:,0], vertups_o[:,1]) df_domain_o = Polygon(dfVertups_o) df_domain_o # + # get the length of the dune field in the direction of the dunes on a transect that passes through met a x_a_o, y_a_o = transform(p_lonlat, p_lidar, -106.289875, 32.863286) tempx2 = np.max(dfVertups_o[:,0]) tempy2 = y_a_o + (tempx2-x_a_o)*np.tan(af0*np.pi/180) tempp3 = LineString([(x_a_o,y_a_o),(tempx2,tempy2)]).intersection(df_domain_o) s_r_m = LineString([(tempp3.coords[0][0],tempp3.coords[0][1]),(tempp3.coords[1][0],tempp3.coords[1][1])]).length # - s_r_m # ### Met towers # + # file locations of the met tower data pa0 = 'mets/Tower1/Tower1_7-17-15/Tower1_Avg10Min.dat' pa1 = 'mets/Tower1/Tower1_8-15-15/Tower1_Avg10Min_2015_08_15_08_15_22.dat' pa2 = 'mets/Tower1/Tower1_8-3-15/Tower1_Avg10Min_2015_08_03_12_20_02.dat' pb0 = 'mets/Tower2/Tower2_7-17-15/Tower2_Avg10Min.dat' pb1 = 'mets/Tower2/Tower2_8-15-15/Tower2_Avg10Min_2015_08_15_09_17_56.dat' pb2 = 'mets/Tower2/Tower2_8-3-15/Tower2_Avg10Min_2015_08_03_12_34_29.dat' pc0 = 'mets/Tower3/Tower3_7-17-15/Tower3_Avg10Min.dat' pc1 = 'mets/Tower3/Tower3_7-6-15/Tower3_Avg10Min.dat' pc2 = 'mets/Tower3/Tower3_8-15-15/Tower3_Avg10Min_2015_08_15_09_32_09.dat' # 2nd file for tower1 is weird, omit path = [pa0,pa1,pb0,pb1,pb2,pc0,pc1,pc2] towe = ['a','a','b','b','b','c','c','c'] # towers are lettered in order of downwind ta = [] # time: tower tb = [] tc = [] aai = [] # angle: tower, initial abi = [] aci = [] u0a = [] # speed: numbered height, tower u1a = [] u2a = [] u0b = [] u1b = [] u2b = [] u0c = [] u1c = [] u2c = [] # grab data for i in range(0,np.shape(path)[0]): tempdata = pd.read_csv(path[i],header=1) if towe[i]=='a': ta = np.append(ta,tempdata.iloc[2:,0]) aai = np.append(aai,tempdata.iloc[2:,3]) u0a = np.append(u0a,tempdata.iloc[2:,12]) u1a = np.append(u1a,tempdata.iloc[2:,8]) u2a = np.append(u2a,tempdata.iloc[2:,2]) elif towe[i]=='b': tb = np.append(tb,tempdata.iloc[2:,0]) abi = np.append(abi,tempdata.iloc[2:,3]) u0b = np.append(u0b,tempdata.iloc[2:,12]) u1b = np.append(u1b,tempdata.iloc[2:,8]) u2b = np.append(u2b,tempdata.iloc[2:,2]) elif towe[i]=='c': tc = np.append(tc,tempdata.iloc[2:,0]) aci = np.append(aci,tempdata.iloc[2:,3]) u0c = np.append(u0c,tempdata.iloc[2:,12]) u1c = np.append(u1c,tempdata.iloc[2:,8]) u2c = np.append(u2c,tempdata.iloc[2:,2]) tas = [] # time: tower, seconds tbs = [] tcs = [] # put times in seconds for i in ta: try: tas.append(int(time.mktime(datetime.datetime.strptime(i,'%Y-%m-%d %H:%M:%S').timetuple()))) except: #because it does 24:00:00 and datetime doesn't like that tas.append(int(time.mktime(datetime.datetime.strptime(i[:-9],'%Y-%m-%d').timetuple()))+24*60*60) for i in tb: try: tbs.append(int(time.mktime(datetime.datetime.strptime(i,'%Y-%m-%d %H:%M:%S').timetuple()))) except: #because it does 24:00:00 and datetime doesn't like that tbs.append(int(time.mktime(datetime.datetime.strptime(i[:-9],'%Y-%m-%d').timetuple()))+24*60*60) for i in tc: try: tcs.append(int(time.mktime(datetime.datetime.strptime(i,'%Y-%m-%d %H:%M:%S').timetuple()))) except: #because it does 24:00:00 and datetime doesn't like that tcs.append(int(time.mktime(datetime.datetime.strptime(i[:-9],'%Y-%m-%d').timetuple()))+24*60*60) tas = np.asarray(tas) tbs = np.asarray(tbs) tcs = np.asarray(tcs) # unfortunately met a was installed such that it thinks true north is 7 degrees counterclockwise of reality FIXMETaANGLE = 7 aai = np.asarray(aai,dtype=np.float)+FIXMETaANGLE # correct this issue abi = np.asarray(abi,dtype=np.float) aci = np.asarray(aci,dtype=np.float) # change the directions such that they are all +-180 of dune orientation # instead of +360 clockwise of north aa = bearing_2_pm180_af0(aai,af0) # angle: tower (convention changed) ab = bearing_2_pm180_af0(abi,af0) ac = bearing_2_pm180_af0(aci,af0) u0a = np.asarray(u0a,dtype=np.float) u1a = np.asarray(u1a,dtype=np.float) u2a = np.asarray(u2a,dtype=np.float) u0b = np.asarray(u0b,dtype=np.float) u1b = np.asarray(u1b,dtype=np.float) u2b = np.asarray(u2b,dtype=np.float) u0c = np.asarray(u0c,dtype=np.float) u1c = np.asarray(u1c,dtype=np.float) u2c = np.asarray(u2c,dtype=np.float) z_m = np.asarray([2,5,10]) # heights of speeds, meters dts_m = tas[1]-tas[0] # time in seconds between wind speed measurements # + # function that takes all of the mutual times for the met towers # and creates a list of unique common values tsc = reduce(np.intersect1d, (tas, tbs, tcs)) # time: seconds, common aac = [] # angle: tower, common abc = [] acc = [] u0ac = [] # speed: numbered height, tower, common u1ac = [] u2ac = [] u0bc = [] u1bc = [] u2bc = [] u0cc = [] u1cc = [] u2cc = [] for i in range(0,np.shape(tsc)[0]): aac = np.append(aac,aa[tas==tsc[i]][0]) abc = np.append(abc,ab[tbs==tsc[i]][0]) acc = np.append(acc,ac[tcs==tsc[i]][0]) u0ac = np.append(u0ac,u0a[tas==tsc[i]][0]) u1ac = np.append(u1ac,u1a[tas==tsc[i]][0]) u2ac = np.append(u2ac,u2a[tas==tsc[i]][0]) u0bc = np.append(u0bc,u0b[tbs==tsc[i]][0]) u1bc = np.append(u1bc,u1b[tbs==tsc[i]][0]) u2bc = np.append(u2bc,u2b[tbs==tsc[i]][0]) u0cc = np.append(u0cc,u0c[tcs==tsc[i]][0]) u1cc = np.append(u1cc,u1c[tcs==tsc[i]][0]) u2cc = np.append(u2cc,u2c[tcs==tsc[i]][0]) # - # ### Lidar # + ############ ## LIDAR 2017 ## ############ # to read files months = range(1,12+1) days = range(1,31+1) part = range(1,3) z_l1 = np.asarray([10,13,16,22,28,36,38,46,60,77,300]) t_l1 = [] # icol 2 timestamp [s] d_l1 = [] # icol 1 DD/MM/YYYY HH:MM:SS gps_l1 = [] u1_l1 = [] # speed: numbered height, lidar unit number u2_l1 = [] u3_l1 = [] u4_l1 = [] u5_l1 = [] u6_l1 = [] u7_l1 = [] u8_l1 = [] u9_l1 = [] u10_l1 = [] u11_l1 = [] a1_l1i = [] # angle: numbered height, lidar unit number, initial convention # grab data for i in months: for j in days: temp_path = Path('lidar/lidar17_csvs/Wind_320@Y2017_M%02d_D%02d.CSV' % (i,j)) if temp_path.is_file(): tempdata = pd.read_csv(temp_path,delimiter=',',header=1) t_l1.extend(tempdata.iloc[:,2]) d_l1.extend(pd.to_datetime(tempdata.iloc[:,1]) - pd.Timedelta(hours=2)) # correct timezone from indiana to alamogordo gps_l1.extend(tempdata.iloc[:,10]) u11_l1.extend(tempdata.iloc[:,20]) u10_l1.extend(tempdata.iloc[:,23]) u9_l1.extend(tempdata.iloc[:,26]) u8_l1.extend(tempdata.iloc[:,29]) u7_l1.extend(tempdata.iloc[:,32]) u6_l1.extend(tempdata.iloc[:,35]) u5_l1.extend(tempdata.iloc[:,38]) u4_l1.extend(tempdata.iloc[:,41]) u3_l1.extend(tempdata.iloc[:,44]) u2_l1.extend(tempdata.iloc[:,47]) u1_l1.extend(tempdata.iloc[:,50]) a1_l1i.extend(tempdata.iloc[:,49]) for k in part: temp_path = Path('lidar/lidar17_csvs/Wind_320@Y2017_M%02d_D%02d-%d.CSV' % (i,j,k)) if temp_path.is_file(): tempdata = pd.read_csv(temp_path,delimiter=',',header=1) t_l1.extend(tempdata.iloc[:,2]) d_l1.extend(pd.to_datetime(tempdata.iloc[:,1]) - pd.Timedelta(hours=2)) #correct timezone from indiana to alamogordo gps_l1.extend(tempdata.iloc[:,10]) u11_l1.extend(tempdata.iloc[:,20]) u10_l1.extend(tempdata.iloc[:,23]) u9_l1.extend(tempdata.iloc[:,26]) u8_l1.extend(tempdata.iloc[:,29]) u7_l1.extend(tempdata.iloc[:,32]) u6_l1.extend(tempdata.iloc[:,35]) u5_l1.extend(tempdata.iloc[:,38]) u4_l1.extend(tempdata.iloc[:,41]) u3_l1.extend(tempdata.iloc[:,44]) u2_l1.extend(tempdata.iloc[:,47]) u1_l1.extend(tempdata.iloc[:,50]) a1_l1i.extend(tempdata.iloc[:,49]) t_l1 = np.asarray(t_l1) gps_l1 = np.asarray(gps_l1) u1_l1 = np.asarray(u1_l1) u2_l1 = np.asarray(u2_l1) u3_l1 = np.asarray(u3_l1) u4_l1 = np.asarray(u4_l1) u5_l1 = np.asarray(u5_l1) u6_l1 = np.asarray(u6_l1) u7_l1 = np.asarray(u7_l1) u8_l1 = np.asarray(u8_l1) u9_l1 = np.asarray(u9_l1) u10_l1 = np.asarray(u10_l1) u11_l1 = np.asarray(u11_l1) a1_l1i = np.asarray(a1_l1i) ##### # remove bad data bad_index = np.append(np.argwhere(u1_l1==9999.0),np.argwhere(gps_l1=='#N/A #N/A')) bad_index = np.append(bad_index,np.argwhere(u2_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(u3_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(u4_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(u5_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(u6_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(u7_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(u8_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(u9_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(u10_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(u11_l1==9999.0)) bad_index = np.append(bad_index,np.argwhere(np.isnan(u1_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u2_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u3_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u4_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u5_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u6_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u7_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u8_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u9_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u10_l1))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u11_l1))) t_l1 = np.delete(t_l1,bad_index) d_l1 = np.delete(d_l1,bad_index) u1_l1 = np.delete(u1_l1,bad_index) u2_l1 = np.delete(u2_l1,bad_index) u3_l1 = np.delete(u3_l1,bad_index) u4_l1 = np.delete(u4_l1,bad_index) u5_l1 = np.delete(u5_l1,bad_index) u6_l1 = np.delete(u6_l1,bad_index) u7_l1 = np.delete(u7_l1,bad_index) u8_l1 = np.delete(u8_l1,bad_index) u9_l1 = np.delete(u9_l1,bad_index) u10_l1 = np.delete(u10_l1,bad_index) u11_l1 = np.delete(u11_l1,bad_index) u_l1 = np.asarray([u1_l1,u2_l1,u3_l1,u4_l1,u5_l1,u6_l1,u7_l1,u8_l1,u9_l1,u10_l1,u11_l1]) a1_l1i = np.delete(a1_l1i,bad_index) a1_l1 = bearing_2_pm180_af0(a1_l1i,af0) #angle in analyzed convention # - print('total number of days of observations for lidar 1 is %d'%(len(t_l1)*17/60/60/24)) # + ############ ## LIDAR 2018 ## ############ # as above but for location b the next year # to read files months = range(1,12+1) days = range(1,31+1) part = range(1,3) z_l2 = np.asarray([10,14,20,28,38,55,78,300]) t_l2 = [] # icol 2 timestamp [s] d_l2 = [] #icol 1 DD/MM/YYYY HH:MM:SS gps_l2 = [] u1_l2 = [] u2_l2 = [] u3_l2 = [] u4_l2 = [] u5_l2 = [] u6_l2 = [] u7_l2 = [] u11_l2 = [] a1_l2i = [] for i in months: for j in days: temp_path = Path('lidar/lidar18_csvs/Wind_320@Y2018_M%02d_D%02d.CSV' % (i,j)) if temp_path.is_file(): tempdata = pd.read_csv(temp_path,delimiter=',',header=1) t_l2.extend(tempdata.iloc[:,2]) d_l2.extend(pd.to_datetime(tempdata.iloc[:,1]) - pd.Timedelta(hours=2)) #correct timezone from indiana to alamogordo gps_l2.extend(tempdata.iloc[:,10]) u11_l2.extend(tempdata.iloc[:,20]) u7_l2.extend(tempdata.iloc[:,32]) u6_l2.extend(tempdata.iloc[:,35]) u5_l2.extend(tempdata.iloc[:,38]) u4_l2.extend(tempdata.iloc[:,41]) u3_l2.extend(tempdata.iloc[:,44]) u2_l2.extend(tempdata.iloc[:,47]) u1_l2.extend(tempdata.iloc[:,50]) a1_l2i.extend(tempdata.iloc[:,49]) for k in part: temp_path = Path('lidar/lidar18_csvs/Wind_320@Y2018_M%02d_D%02d-%d.CSV' % (i,j,k)) if temp_path.is_file(): tempdata = pd.read_csv(temp_path,delimiter=',',header=1) t_l2.extend(tempdata.iloc[:,2]) d_l2.extend(pd.to_datetime(tempdata.iloc[:,1]) - pd.Timedelta(hours=2)) #correct timezone from indiana to alamogordo gps_l2.extend(tempdata.iloc[:,10]) u11_l2.extend(tempdata.iloc[:,20]) u7_l2.extend(tempdata.iloc[:,32]) u6_l2.extend(tempdata.iloc[:,35]) u5_l2.extend(tempdata.iloc[:,38]) u4_l2.extend(tempdata.iloc[:,41]) u3_l2.extend(tempdata.iloc[:,44]) u2_l2.extend(tempdata.iloc[:,47]) u1_l2.extend(tempdata.iloc[:,50]) a1_l2i.extend(tempdata.iloc[:,49]) t_l2 = np.asarray(t_l2) gps_l2 = np.asarray(gps_l2) u1_l2 = np.asarray(u1_l2) u2_l2 = np.asarray(u2_l2) u3_l2 = np.asarray(u3_l2) u4_l2 = np.asarray(u4_l2) u5_l2 = np.asarray(u5_l2) u6_l2 = np.asarray(u6_l2) u7_l2 = np.asarray(u7_l2) u11_l2 = np.asarray(u11_l2) a1_l2i = np.asarray(a1_l2i) ##### bad_index = np.append(np.argwhere(u1_l2==9999.0),np.argwhere(gps_l2=='#N/A #N/A')) bad_index = np.append(bad_index,np.argwhere(u2_l2==9999.0)) bad_index = np.append(bad_index,np.argwhere(u3_l2==9999.0)) bad_index = np.append(bad_index,np.argwhere(u4_l2==9999.0)) bad_index = np.append(bad_index,np.argwhere(u5_l2==9999.0)) bad_index = np.append(bad_index,np.argwhere(u6_l2==9999.0)) bad_index = np.append(bad_index,np.argwhere(u7_l2==9999.0)) bad_index = np.append(bad_index,np.argwhere(u11_l2==9999.0)) bad_index = np.append(bad_index,np.argwhere(np.isnan(u1_l2))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u2_l2))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u3_l2))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u4_l2))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u5_l2))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u6_l2))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u7_l2))) bad_index = np.append(bad_index,np.argwhere(np.isnan(u11_l2))) t_l2 = np.delete(t_l2,bad_index) d_l2 = np.delete(d_l2,bad_index) u1_l2 = np.delete(u1_l2,bad_index) u2_l2 = np.delete(u2_l2,bad_index) u3_l2 = np.delete(u3_l2,bad_index) u4_l2 = np.delete(u4_l2,bad_index) u5_l2 = np.delete(u5_l2,bad_index) u6_l2 = np.delete(u6_l2,bad_index) u7_l2 = np.delete(u7_l2,bad_index) u11_l2 = np.delete(u11_l2,bad_index) u_l2 = np.asarray([u1_l2,u2_l2,u3_l2,u4_l2,u5_l2,u6_l2,u7_l2,u11_l2]) a1_l2i = np.delete(a1_l2i,bad_index) a1_l2 = bearing_2_pm180_af0(a1_l2i,af0) # - print('total number of days of observations for lidar 2 is %d'%(len(t_l2)*17/60/60/24)) # # Analysis # ## DEM # + # measure topography variability (similar to roughness) # done on zr_jun10, almost identical for each dem temp = np.copy(zr_jun10) temp[np.isnan(temp)] = 0 # to allow function below stdr_jun10 = window_std(temp,stdwidth) # make a map of local (100m^2 tile) standard deviation del temp stdr_jun10[stdr_jun10>10] = np.nan # remove spuriously large values stdr_jun10[np.isnan(zr_jun10)] = np.nan # remove values outside of map stdr_jun10s = np.empty([np.shape(yr)[0],np.shape(xr)[0]+uwimax]) stdr_jun10s[:] = np.nan # map: stdr: standard deviation, r: rotated, jun10: month observed, s: x_r normed to distance from boundary # shifts each transect in xr such that they all pass through the upwind boundary at a common array index # this means that for a common index you're equidistant from the boundary # this frame of reference is called sr, and is useful for span-wise averages for i in np.arange(np.shape(yr)[0]): templ = LineString([(np.min(xr),yr[i]),(np.max(xr),yr[i])]) tempp = templ.intersection(upwind_boundary_r) try: uwi = np.argmin(np.abs(xr-tempp.coords[0][0])) stdr_jun10s[i,uwimax-uwi:np.shape(xr)[0]+uwimax-uwi] = stdr_jun10[i,:] except: continue del stdr_jun10 with open('DEMs/stdr_jun10s.pkl', 'wb') as file: pickle.dump(stdr_jun10s, file) del stdr_jun10s # - # create a mean transect of topography variability with open('DEMs/stdr_jun10s.pkl', 'rb') as file: stdr_jun10s = pickle.load(file) stdr_jun10savg = np.nanmean(stdr_jun10s,axis=0) # span-wise average sr = np.arange(np.shape(stdr_jun10s)[1])-uwimax # along transect distance (grid spacing is 1 m) del stdr_jun10s # unbias the dem differences and make sediment flux maps dzdtcs = demcorrect(dems,dempairs,demdts) for i in range(len(dempairs)): qs = demflux(dems[dempairs[i][1]],dems[dempairs[i][0]],dzdtcs[i],xr) with open('DEMs/qs%d.pkl'%i, 'wb') as file: pickle.dump(qs, file) del dzdtcs, qs # change flux maps in xr,yr to sr,yr for i in range(len(dempairs)): with open('DEMs/qs%d.pkl'%i, 'rb') as file: qs = pickle.load(file) qss = np.empty([np.shape(yr)[0],np.shape(xr)[0]+uwimax]) qss[:] = np.nan for j in np.arange(np.shape(yr)[0]): templ = LineString([(np.min(xr),yr[j]),(np.max(xr),yr[j])]) tempp = templ.intersection(upwind_boundary_r) try: uwi = np.argmin(np.abs(xr-tempp.coords[0][0])) qss[j,uwimax-uwi:np.shape(xr)[0]+uwimax-uwi-1] = qs[j,:] except: continue with open('DEMs/qs%ds.pkl'%i, 'wb') as file: pickle.dump(qss, file) del qs, qss # + # create mean transects of sediment flux and variability qavg = [] qp25 = [] qp75 = [] for i in range(len(dempairs)): with open('DEMs/qs%ds.pkl'%i, 'rb') as file: qss = pickle.load(file) qavg.append(np.nanmean(qss,axis=0)) qp25.append(np.nanpercentile(qss,25,axis=0)) qp75.append(np.nanpercentile(qss,75,axis=0)) del qss # + # get the variability in dune direction from each map ds = gdal.Open(jan09, gdal.GA_ReadOnly) rb = ds.GetRasterBand(1) z_jan09 = np.flip(rb.ReadAsArray(),axis=0) ds = gdal.Open(jun10, gdal.GA_ReadOnly) rb = ds.GetRasterBand(1) z_jun10 = np.flip(rb.ReadAsArray(),axis=0) pdd_jan09,ppd_bins = angledist(z_jan09,x,y,5,df_domain_o) pdd_sep09,_ = angledist(z_sep09,x,y,5,df_domain_o) pdd_jun10,_ = angledist(z_jun10,x,y,5,df_domain_o) del z_jan09, z_jun10 # - # ## In-situ # + # get aligned concurrent winds from the met towers # winds within adelta (15 degrees) of the dune direction # and within adelta of the upwind wind direction # 2 m elevation u0aca = u0ac[(aac<1*adelta)&(aac>-1*adelta)& (abc<2*adelta)&(abc>-2*adelta)& (acc<3*adelta)&(acc>-3*adelta)] # speed: numbered height, tower, common, aligned u0bca = u0bc[(aac<1*adelta)&(aac>-1*adelta)& (abc<2*adelta)&(abc>-2*adelta)& (acc<3*adelta)&(acc>-3*adelta)] u0cca = u0cc[(aac<1*adelta)&(aac>-1*adelta)& (abc<2*adelta)&(abc>-2*adelta)& (acc<3*adelta)&(acc>-3*adelta)] # 10 m elevation u2aca = u2ac[(aac<1*adelta)&(aac>-1*adelta)& (abc<2*adelta)&(abc>-2*adelta)& (acc<3*adelta)&(acc>-3*adelta)] # speed: numbered height, tower, common, aligned u2bca = u2bc[(aac<1*adelta)&(aac>-1*adelta)& (abc<2*adelta)&(abc>-2*adelta)& (acc<3*adelta)&(acc>-3*adelta)] u2cca = u2cc[(aac<1*adelta)&(aac>-1*adelta)& (abc<2*adelta)&(abc>-2*adelta)& (acc<3*adelta)&(acc>-3*adelta)] # get fluxes from some of these winds q2a = flux(u0a,z_m[2],z00) # assumed local roughness of z00 q2bca_av = np.mean(flux(u2bca,z_m[2],z00)) q2cca_av = np.mean(flux(u2cca,z_m[2],z00)) # + # get the probability distribution of aligned wind speeds ubins = np.linspace(0,13,14) ubinmids = ubins[1:] - np.diff(ubins)[0]/2 u2acnt,_ = np.histogram(u2aca,bins=ubins) pu2a = u2acnt/np.sum(u2acnt) fig = plt.gcf() plt.subplot(111) plt.plot(ubinmids,pu2a,color='r') plt.yscale('log') plt.xlim(0,13) plt.ylabel('$P(U^a_{10})$',fontsize=fs*2) plt.xlabel('$U^a_{10}$ (m/s)',fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) fig.set_size_inches(6, 6, forward=True) # + # find the mean and variability in wind profiles for lidar during 10 m aligned winds u_l1a = u_l1[:,(a1_l1<1*adelta)&(a1_l1>-1*adelta)] uavl1 = np.mean(u_l1a[:-1],axis=1)/np.mean(u_l1a[-2]) u25l1 = np.percentile(u_l1a[:-1],25,axis=1)/np.mean(u_l1a[-2]) u75l1 = np.percentile(u_l1a[:-1],75,axis=1)/np.mean(u_l1a[-2]) u_l2a = u_l2[:,(a1_l2<1*adelta)&(a1_l2>-1*adelta)] uavl2 = np.mean(u_l2a[:-1],axis=1)/np.mean(u_l2a[-2]) u25l2 = np.percentile(u_l2a[:-1],25,axis=1)/np.mean(u_l2a[-2]) u75l2 = np.percentile(u_l2a[:-1],75,axis=1)/np.mean(u_l2a[-2]) # find the mean and variablity in 300 m wind speed for binned 10 m aligned wind speeds # for each lidar u1_la_bins = np.linspace(0,15,16) u1_la_binned = u1_la_bins[:-1]+np.diff(u1_la_bins)/2 u11_l1a_binnedav = [] u11_l1a_binned25 = [] u11_l1a_binned75 = [] u11_l2a_binnedav = [] u11_l2a_binned25 = [] u11_l2a_binned75 = [] for i in np.arange(0,np.shape(u1_la_bins)[0]-1): tempind_l1 = np.argwhere((u_l1a[0]>u1_la_bins[i])&(u_l1a[0]<=u1_la_bins[i+1])) u11_l1a_binnedav = np.append(u11_l1a_binnedav,np.mean(u_l1a[-1,tempind_l1])) u11_l1a_binned25 = np.append(u11_l1a_binned25,np.nanpercentile(u_l1a[-1,tempind_l1],25)) u11_l1a_binned75 = np.append(u11_l1a_binned75,np.nanpercentile(u_l1a[-1,tempind_l1],75)) tempind_l2 = np.argwhere((u_l2a[0]>u1_la_bins[i])&(u_l2a[0]<=u1_la_bins[i+1])) u11_l2a_binnedav = np.append(u11_l2a_binnedav,np.mean(u_l2a[-1,tempind_l2])) u11_l2a_binned25 = np.append(u11_l2a_binned25,np.nanpercentile(u_l2a[-1,tempind_l2],25)) u11_l2a_binned75 = np.append(u11_l2a_binned75,np.nanpercentile(u_l2a[-1,tempind_l2],75)) # + # find the probability distribution of fluxes from the 5 wind instruments # for given wind directions q1_l1 = flux(u_l1[0],z_l1[0],z00) q1_l2 = flux(u_l2[0],z_l2[0],z00) q2a = flux(u2a,z_m[2],z00) q2b = flux(u2b,z_m[2],z00) q2c = flux(u2c,z_m[2],z00) da = 5 mAf = np.arange(-180,180+da,da) mAmidf = mAf[1:]-da/2 pq1_l1 = np.empty(len(mAmidf)) pq1_l2 = np.empty(len(mAmidf)) pq2a = np.empty(len(mAmidf)) pq2b = np.empty(len(mAmidf)) pq2c = np.empty(len(mAmidf)) for i in range(len(mAmidf)): pq1_l1[i] = np.sum(q1_l1[(a1_l1>=mAf[i])&(a1_l1<mAf[i+1])]) pq1_l2[i] = np.sum(q1_l2[(a1_l2>=mAf[i])&(a1_l2<mAf[i+1])]) pq2a[i] = np.sum(q2a[(aa>=mAf[i])&(aa<mAf[i+1])]) pq2b[i] = np.sum(q2b[(ab>=mAf[i])&(ab<mAf[i+1])]) pq2c[i] = np.sum(q2c[(ac>=mAf[i])&(ac<mAf[i+1])]) pq1_l1 = pq1_l1/np.sum(q1_l1) pq1_l2 = pq1_l2/np.sum(q1_l2) pq2a = pq2a/np.sum(q2a) pq2b = pq2b/np.sum(q2b) pq2c = pq2c/np.sum(q2c) # - # ## Theory # + # get a theoretical transect in wind speed and flux # because 1) distance travelled depends on speed, # 2) 10 m speeds are non-linearly related to forcing # 3) sediment flux is a threshold phenomena, # and 4) speeds occur as a distribution in reality, # care must be taken to make sure this transect is equivalent to reality # find the relevant geostrophic forcing wind for each value in the aligned 10 m # winds from the upwind met tower distribution G_t = np.empty_like(ubinmids) for i in range(len(G_t)): G_t[i] = gfromu(ubinmids[i],z_m[2],z00) a0_t = a(G_t,z_m[2],z00,Z,L0) # upwind damping for the BCs A0_t = G_t/(1-1j*a0_t/f) # upwind equilibrium wind vector a1_t = a(G_t,z_m[2],z01,Z,L0) # downwind damping for the BCs e1_t = np.exp(-(a1_t[:,np.newaxis]+1j*f)*t_t[np.newaxis,:]) # decay downwind of boundary B1_t = G_t/(1-1j*a1_t/f) # downwind equilibrium for the BCs A1_t = (A0_t[:,np.newaxis]-B1_t[:,np.newaxis])*e1_t+B1_t[:,np.newaxis] # downwind wind vector evolution U1_t = np.abs(A1_t) # downwind wind speed evolution T1_t = np.arctan2(A1_t.imag,A1_t.real) # upwind equilibrium wind angle # downwind wind angle evolution relative to upwind dT1_t = (T1_t - np.arctan2(A0_t[:,np.newaxis].imag,A0_t[:,np.newaxis].real))*180/np.pi S1_t = t_t*U1_t # distance travelled in lagrangian f.o.r. from boundary X1_t = S1_t*np.cos(T1_t) # distanced travelled normal to boundary Q1_t = flux(U1_t,z_m[2],z00) # sediment flux from winds # put this range of forcing conditions together such that they occur with the same # likelihood as reality tempu = U1_t*pu2a[:,np.newaxis] # wind speeds scaled by likelihood of being measured at met_a tempq = Q1_t*pu2a[:,np.newaxis] # sediment flux scaled as above # create transects X_tbins = np.linspace(0,np.ceil(np.max(sr)/100)*100,int((np.ceil(np.max(sr)/100)))+1) X_t = X_tbins[1:] - np.diff(X_tbins)[0]/2 U_t = np.empty([np.shape(X_t)[0],np.shape(tempu)[0]]) Q_t = np.empty([np.shape(X_t)[0],np.shape(tempu)[0]]) # for a given distance bin downwind, get the mean of the speeds and fluxes for each forcing condition for i in range(len(X_t)): for j in range(len(tempu)): U_t[i,j] = np.mean(tempu[j][(X1_t[j]>X_tbins[i])&(X1_t[j]<=X_tbins[i+1])]) Q_t[i,j] = np.mean(tempq[j][(X1_t[j]>X_tbins[i])&(X1_t[j]<=X_tbins[i+1])]) # get the net speed and flux transect U_t = np.sum(U_t,axis=1) Q_t = np.sum(Q_t,axis=1) # + # each forcing scenario in color and the net transect in black fig = plt.gcf() plt.subplot(211) for i in range(len(U1_t)): plt.plot(X1_t[i]*m2km,U1_t[i]) plt.plot(X_t*m2km,U_t,'k') plt.xlim(0,np.max(X1_t*m2km)) plt.ylim(0,np.max(U1_t)) plt.ylabel('$U_{10}$ (m/s)',fontsize=fs*2) plt.xticks([],[]) plt.yticks(fontsize=fs*2) plt.subplot(212) for i in range(len(U1_t)): plt.plot(X1_t[i]*m2km,Q1_t[i]/s2y) plt.plot(X_t*m2km,Q_t/s2y,'k') plt.xlim(0,np.max(X1_t*m2km)) plt.ylim(0,np.max(Q1_t)/s2y) plt.xlabel('distance (km)',fontsize=fs*2) plt.ylabel('$q_{s}$ (m$^{2}$/yr)',fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) fig.set_size_inches(6, 6, forward=True) # + # get the range in wind speeds measured at 2 m for each met tower across a range of forcing conditions Ng = 10 # number of forcing steps G = np.linspace(1,50,Ng) # the geostrophic flow speed forcing # theory as above a0 = a(G,z_m[0],z00,Z,L0) A0 = G/(1-1j*a0/f) a1 = a(G,z_m[0],z01,Z,L0) e1 = np.exp(-(a1[:,np.newaxis]+1j*f)*t_t[np.newaxis,:]) B1 = G/(1-1j*a1/f) A = (A0[:,np.newaxis]-B1[:,np.newaxis])*e1+B1[:,np.newaxis] U = np.abs(A) T = np.arctan2(A.imag,A.real) S = t_t*U X = S*np.cos(T) # 2 m wind speed for each met tower because s_X_o is the distance from the boundary U0a = U[np.arange(0,Ng),np.argmin(np.abs(X-s_a_o),axis=-1)] U0b = U[np.arange(0,Ng),np.argmin(np.abs(X-s_b_o),axis=-1)] U0c = U[np.arange(0,Ng),np.argmin(np.abs(X-s_c_o),axis=-1)] # + # for a fixed geostrophic condition, report the saturation values for the chosen # White Sands roughnesses G = 20 # fixed geostrophic wind speed z = 10 # reference elevation in each scenario a0 = a(G,z,z00,Z,L0) A0 = G/(1-1j*a0/f) a1 = a(G,z,z01,Z,L0) e1 = np.exp(-(a1+1j*f)*t_t) B1 = G/(1-1j*a1/f) A1 = (A0-B1)*e1+B1 U1 = np.abs(A1) T1 = np.arctan2(A1.imag,A1.real) S1 = t_t*U1 X1 = S1*np.cos(T1) Usat = np.abs(B1/A0) Asat = np.arctan2(f,a0) - np.arctan2(f,a1) U1s = np.abs((U1-np.abs(B1))/np.abs(B1)) envpi = np.argwhere(U1s<eps) indb = np.min(envpi) LSb = X1[indb] print('Saturated wind speed is %.4f times the outside wind'%Usat) print('Saturated wind direction is %.1f degrees to the North of the outside wind'%(Asat*180/np.pi)) print('Saturation length is %.1f km downwind of the edge'%(LSb*m2km)) # + # get theoretical speed and angle transects for different pairs of roughnesses # as well as saturated values for the angle and speed changes, and the distance required for saturation # also find the amount of net erosion and deposition we can expect some distance # from the boundary between roughnesses Nz0 = 100 # root of the number of difference roughnesses scenarios G = 20 # fixed geostrophic wind speed z = 10 # reference elevation in each scenario z0_0 = np.logspace(-5,0,Nz0) # roughness length of upwind z0_1 = z0_0 # roughness length of downwind Z0_0,Z0_1 = np.meshgrid(z0_0,z0_1) I,J = np.meshgrid(np.arange(0,Nz0),np.arange(0,Nz0)) Z0r = Z0_0/Z0_1 z0_0wsi = np.argmin(np.abs(z0_0-z00)) # indices of the closest scenario to the white sands theory z0_1wsi = np.argmin(np.abs(z0_1-z01)) # same as above a0 = a(G,z,Z0_0,Z,L0) A0 = G/(1-1j*a0/f) a1 = a(G,z,Z0_1,Z,L0) e1 = np.exp(-(a1[:,:,np.newaxis]+1j*f)*t_t[np.newaxis,np.newaxis,:]) B1 = G/(1-1j*a1/f) A1 = (A0[:,:,np.newaxis]-B1[:,:,np.newaxis])*e1+B1[:,:,np.newaxis] U1 = np.abs(A1) T1 = np.arctan2(A1.imag,A1.real) dT = (T1 - np.arctan2(A0[:,:,np.newaxis].imag,A0[:,:,np.newaxis].real))*180/np.pi S1 = t_t*U1 X1 = S1*np.cos(T1) Q1 = flux(U1,z,z00) # do exner to get the deposition rate # get the rate at a fixed distance downwind (here the along-wind length of white sands at the met transect) DZDT = -np.diff(Q1,axis=-1)/np.diff(X1,axis=-1)/phi DZDTr = DZDT[J,I,np.argmin(np.abs(X1-s_r_m),axis=-1)] # this rate needs to be scaled for intermittency # done by scaling such that the white sands roughness pair scenario has the same flux at the boundary # as the theoretical transect incorporating the measured likelihood of winds at met a factor = Q_t[0]/Q1[z0_1wsi,z0_0wsi][0] DZDTr = DZDTr*factor DZDTr[DZDTr==0] = np.nan # get rid of zero deposition rate values for plotting # saturated ratio of wind speed and veering angle between upwind and downwind Usat = np.abs(B1/A0) Asat = np.arctan2(f,a0) - np.arctan2(f,a1) print('The intermittency factor is %.4f'%factor) # + # get the saturation distance # defined as the closest distance normal to the boundary where the wind speed is # within 1% of equilibrium with downwind conditions U1s = np.abs((U1-np.abs(B1[:,:,np.newaxis]))/np.abs(B1[:,:,np.newaxis])) LSb = np.zeros_like(Z0r) for i in np.arange(0,np.shape(U1s)[0]): for j in np.arange(0,np.shape(U1s)[1]): envpi = np.argwhere(U1s[i,j,:]<eps) indb = np.min(envpi) LSb[i,j] = X1[i,j,indb] #find distance associated with convergence to (1+-eps)*Uinf # - # ### LES and Deposition comparison data # + # to show profiles of damping frequency # do examples from 1) inverted from wang & anderson LES, # inverted from the upwind lidar profile, # and the values from the white sands scenario (momen & bou-zeid method) tempu = np.mean(u_l1a[:-1],axis=1) temp_path = 'wsnm3.csv' # grab data taken from plot in wang & anderson tempdata = pd.read_csv(temp_path,delimiter=',',header=-1) uhat_les = np.asarray(tempdata.iloc[:,0]) zhat_les = np.asarray(tempdata.iloc[:,1]) H_les = 100 us_les = 1 z_les = zhat_les*H_les u_les = uhat_les*us_les g_les = 20 #u_les[-1] g_l = gfromu(tempu[-1],z_l1[-2],z00) g_mbz = g_l # a_l = f*((g_l/tempu)**2-1)**0.5 # a_les = f*((g_les/u_les)**2-1)**0.5 a_l = f*(g_l/tempu-1) a_les = f*(g_les/u_les-1) al0_mbz = a(g_mbz,z_t,z00,Z,L0) al1_mbz = a(g_mbz,z_t,z01,Z,L0) # + # grab the OSL measured deposition rates at white sands from kocurek et al 2007 dzdtr_k07 = [0.5,0.9,2.5,1.4,1.1,1.4] dzdtr_k07av = np.mean(dzdtr_k07) dzdtr_k0725 = np.percentile(dzdtr_k07,25) dzdtr_k0775 = np.percentile(dzdtr_k07,75) dzdtr_k070 = np.percentile(dzdtr_k07,0) dzdtr_k07100 = np.percentile(dzdtr_k07,100) x_k07, y_k07 = transform(p_lonlat, p_lidar, -106.265287, 32.820668) tempx2 = np.min(upwindVertups_o[:,0]) tempy2 = y_k07 + (tempx2-x_k07)*np.tan(af0*np.pi/180) tempp3 = LineString([(x_k07,y_k07),(tempx2,tempy2)]).intersection(upwind_boundary_o) s_k07 = LineString([(tempp3.coords[0][0],tempp3.coords[0][1]),(x_k07, y_k07)]).length s_k07 DZDTr_k07 = DZDT[J,I,np.argmin(np.abs(X1-s_k07),axis=-1)] DZDTr_k07 = DZDTr_k07*factor DZDTr_k07[DZDTr_k07==0] = np.nan # get rid of zero deposition rate values for plotting # - # ### Data-driven alpha theory # + def a_calc(z): az0,az1 = np.polyfit(np.log10(z_l1[:-1]),np.log10(a_l),deg=1) return 10**az1*z**az0 def a_les_calc(z): z_new = np.hstack((1,z_les,100)) a_new = np.hstack((a_les[0],a_les,a_les[-1])) f = interp1d(z_new,a_new) return f(z) # + z_temp = np.logspace(0,2,100) fig = plt.gcf() plt.subplot(121) plt.plot(a_les,z_les,'-o') plt.plot(a_les_calc(z_temp),z_temp) plt.plot(a_l,z_l1[:-1],'-o') plt.plot(a_calc(z_temp),z_temp) plt.plot(al0_mbz,z_t) plt.plot(al1_mbz,z_t) plt.yscale('log') plt.xscale('log') plt.xlabel('$\\alpha$ (1/s)',fontsize=fs*2) plt.ylabel('$z$ (m)',fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) plt.subplot(122) plt.plot(a_les/g_les,z_les,'-o') plt.plot(a_les_calc(z_temp)/g_les,z_temp) plt.plot(a_l/g_l,z_l1[:-1],'-o') plt.plot(a_calc(z_temp)/g_l,z_temp) plt.plot(al0_mbz/g_mbz,z_t) plt.plot(al1_mbz/g_mbz,z_t) plt.yscale('log') plt.xscale('log') plt.xlabel('$\\alpha/G$ (1/m)',fontsize=fs*2) plt.ylabel('$z$ (m)',fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) fig.set_size_inches(12, 6, forward=True) # + T = 60*60*80 # total model time t_t = np.logspace(0,np.log10(T),Nt) # the time step grid # get a theoretical transect in wind speed and flux # because 1) distance travelled depends on speed, # 2) 10 m speeds are non-linearly related to forcing # 3) sediment flux is a threshold phenomena, # and 4) speeds occur as a distribution in reality, # care must be taken to make sure this transect is equivalent to reality # find the relevant geostrophic forcing wind for each value in the aligned 10 m # winds from the upwind met tower distribution G_t = np.empty_like(ubinmids) for i in range(len(G_t)): G_t[i] = gfromu(ubinmids[i],z_m[2],z00) a0_t_DD = a_calc(z_m[2])/g_l*G_t # upwind damping for the BCs A0_t_DD = G_t/(1-1j*a0_t_DD/f) # upwind equilibrium wind vector a1_t_DD = a_les_calc(z_m[2])/g_les*G_t # downwind damping for the BCs e1_t_DD = np.exp(-(a1_t_DD[:,np.newaxis]+1j*f)*t_t[np.newaxis,:]) # decay downwind of boundary B1_t_DD = G_t/(1-1j*a1_t_DD/f) # downwind equilibrium for the BCs A1_t_DD = (A0_t_DD[:,np.newaxis]-B1_t_DD[:,np.newaxis])*e1_t_DD+B1_t_DD[:,np.newaxis] # downwind wind vector evolution U1_t_DD = np.abs(A1_t_DD) # downwind wind speed evolution T1_t_DD = np.arctan2(A1_t_DD.imag,A1_t_DD.real) # upwind equilibrium wind angle # downwind wind angle evolution relative to upwind dT1_t_DD = (T1_t_DD - np.arctan2(A0_t_DD[:,np.newaxis].imag,A0_t_DD[:,np.newaxis].real))*180/np.pi S1_t_DD = t_t*U1_t_DD # distance travelled in lagrangian f.o.r. from boundary X1_t_DD = S1_t_DD*np.cos(T1_t_DD) # distanced travelled normal to boundary Q1_t_DD = flux(U1_t_DD,z_m[2],z00) # sediment flux from winds # put this range of forcing conditions together such that they occur with the same # likelihood as reality tempu_DD = U1_t_DD*pu2a[:,np.newaxis] # wind speeds scaled by likelihood of being measured at met_a tempq_DD = Q1_t_DD*pu2a[:,np.newaxis] # sediment flux scaled as above # create transects X_tbins = np.linspace(0,np.ceil(np.max(sr)/100)*100,int((np.ceil(np.max(sr)/100)))+1) X_t = X_tbins[1:] - np.diff(X_tbins)[0]/2 U_t_DD = np.empty([np.shape(X_t)[0],np.shape(tempu_DD)[0]]) Q_t_DD = np.empty([np.shape(X_t)[0],np.shape(tempu_DD)[0]]) # for a given distance bin downwind, get the mean of the speeds and fluxes for each forcing condition for i in range(len(X_t)): for j in range(len(tempu_DD)): U_t_DD[i,j] = np.mean(tempu_DD[j][(X1_t_DD[j]>X_tbins[i])&(X1_t_DD[j]<=X_tbins[i+1])]) Q_t_DD[i,j] = np.mean(tempq_DD[j][(X1_t_DD[j]>X_tbins[i])&(X1_t_DD[j]<=X_tbins[i+1])]) # get the net speed and flux transect U_t_DD = np.sum(U_t_DD,axis=1) Q_t_DD = np.sum(Q_t_DD,axis=1) # + # each forcing scenario in color and the net transect in black fig = plt.gcf() plt.subplot(211) for i in range(len(U1_t_DD)): plt.plot(X1_t_DD[i]*m2km,U1_t_DD[i]) plt.plot(X_t*m2km,U_t_DD,'k') plt.xlim(0,np.max(X1_t_DD*m2km)) plt.ylim(0,np.max(U1_t_DD)) plt.ylabel('$U_{10}$ (m/s)',fontsize=fs*2) plt.xticks([],[]) plt.yticks(fontsize=fs*2) plt.subplot(212) for i in range(len(U1_t_DD)): plt.plot(X1_t_DD[i]*m2km,Q1_t_DD[i]/s2y) plt.plot(X_t*m2km,Q_t_DD/s2y,'k') plt.xlim(0,np.max(X1_t_DD*m2km)) plt.ylim(0,np.max(Q1_t_DD)/s2y) plt.xlabel('distance (km)',fontsize=fs*2) plt.ylabel('$q_{s}$ (m$^{2}$/yr)',fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) fig.set_size_inches(6, 6, forward=True) # + # get the range in wind speeds measured at 2 m for each met tower across a range of forcing conditions Ng = 10 # number of forcing steps G = np.linspace(1,50,Ng) # the geostrophic flow speed forcing # theory as above a0_DD = a_calc(z_m[0])/g_l*G # upwind damping for the BCs A0_DD = G/(1-1j*a0_DD/f) # upwind equilibrium wind vector a1_DD = a_les_calc(z_m[0])/g_les*G # downwind damping for the BCs e1_DD = np.exp(-(a1_DD[:,np.newaxis]+1j*f)*t_t[np.newaxis,:]) B1_DD = G/(1-1j*a1_DD/f) A_DD = (A0_DD[:,np.newaxis]-B1_DD[:,np.newaxis])*e1_DD+B1_DD[:,np.newaxis] U_DD = np.abs(A_DD) T_DD = np.arctan2(A_DD.imag,A_DD.real) S_DD = t_t*U_DD X_DD = S_DD*np.cos(T_DD) # 2 m wind speed for each met tower because s_X_o is the distance from the boundary U0a_DD = U_DD[np.arange(0,Ng),np.argmin(np.abs(X_DD-s_a_o),axis=-1)] U0b_DD = U_DD[np.arange(0,Ng),np.argmin(np.abs(X_DD-s_b_o),axis=-1)] U0c_DD = U_DD[np.arange(0,Ng),np.argmin(np.abs(X_DD-s_c_o),axis=-1)] # - plt.plot(U0a-U0b,U0a-U0c,'grey',lw=lw*1.5) plt.plot(U0a_DD-U0b_DD,U0a_DD-U0c_DD,'k',lw=lw*1.5) plt.axis('square') plt.xlim(-6,6) plt.ylim(-6,6) # + lw = 0.5 S = 40 A = 0.5 i = 4800 templ = LineString([(np.min(xr),yr[i]),(np.max(xr),yr[i])]) tempp = templ.intersection(upwind_boundary_r) uwi = np.argmin(np.abs(xr-tempp.coords[0][0])) fig = plt.gcf() ax0 = plt.subplot(311) plt.plot(sr*m2km,stdr_jun10savg,c='y',lw=lw*1.5,label='Jun10') plt.legend(frameon=False,fontsize=fs*2) plt.ylabel('$\\sigma_{\\eta}$ (m)',fontsize=fs*2) plt.xticks([],[]) plt.yticks(fontsize=fs*2) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) ax1 = plt.subplot(312) plt.scatter([s_a_o*m2km,s_b_o*m2km,s_c_o*m2km],[np.mean(u2aca),np.mean(u2bca),np.mean(u2cca)],c='c',marker='v',s=S,alpha=A,lw=0,label='Met') plt.plot([s_a_o*m2km,s_a_o*m2km],[np.percentile(u2aca,25),np.percentile(u2aca,75)],c='c',alpha=A,lw=lw) plt.plot([s_b_o*m2km,s_b_o*m2km],[np.percentile(u2bca,25),np.percentile(u2bca,75)],c='c',alpha=A,lw=lw) plt.plot([s_c_o*m2km,s_c_o*m2km],[np.percentile(u2cca,25),np.percentile(u2cca,75)],c='c',alpha=A,lw=lw) plt.plot(X_t*m2km,U_t,'grey',lw=lw*1.5,label='Theory') plt.plot(X_t*m2km,U_t_DD,'k',lw=lw*1.5,label='Theory DD') plt.plot([(xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,X_t[0]*m2km],[U_t[0],U_t[0]],'grey',lw=lw*1.5) plt.legend(frameon=False,fontsize=fs*2) plt.ylabel('$U_{10}$ (m/s)',fontsize=fs*2) plt.xticks([],[]) plt.yticks(fontsize=fs*2) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) ax1.yaxis.set_label_position("right") ax1.yaxis.tick_right() ax2 = plt.subplot(313) plt.plot(sr*m2km,qavg[0]/s2y,c='r',lw=lw*1.5,label='Jan09-Sep09') plt.fill_between(sr*m2km,qp25[0]/s2y,qp75[0]/s2y,alpha=0.1,lw=0,color='b') plt.plot(sr*m2km,qavg[1]/s2y,c='b',lw=lw*1.5,label='Sep09-Jun10') plt.fill_between(sr*m2km,qp25[1]/s2y,qp75[1]/s2y,alpha=0.1,lw=0,color='r') plt.plot(sr*m2km,qavg[2]/s2y,c='g',lw=lw*1.5,label='Jan09-Jun10') plt.fill_between(sr*m2km,qp25[2]/s2y,qp75[2]/s2y,alpha=0.1,lw=0,color='g') plt.scatter([s_b_o*m2km,s_c_o*m2km],[q2bca_av/s2y,q2cca_av/s2y],c='c',marker='v',s=S,alpha=A,lw=0) plt.plot(X_t*m2km,Q_t/s2y,'grey',lw=lw*1.5) plt.plot(X_t*m2km,Q_t_DD/s2y,'k',lw=lw*1.5) plt.legend(frameon=False,fontsize=fs*2) plt.xlabel('$S_d$ (km)',fontsize=fs*2) plt.ylabel('$q_s$ (m$^2$/s)',fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) fig.set_size_inches(12,12, forward=True) # - # ### Data-driven roughness theory # + def step(t,targetX,G,sigma,oldA): z01 = sigma/z01_sigma_sf a1_t_SR = a(G,z_m[2],z01,Z,L0) # downwind damping for the BCs e1_t_SR = np.exp(-(a1_t_SR+1j*f)*t) # decay downwind of boundary B1_t_SR = G/(1-1j*a1_t_SR/f) # downwind equilibrium for the BCs A1_t_SR = (oldA-B1_t_SR)*e1_t_SR+B1_t_SR # downwind wind vector evolution U1_t_SR = np.abs(A1_t_SR) # downwind wind speed evolution T1_t_SR = np.arctan2(A1_t_SR.imag,A1_t_SR.real) # upwind equilibrium wind angle S1_t_SR = t*U1_t_SR # distance travelled in lagrangian f.o.r. from boundary X1_t_SR = S1_t_SR*np.cos(T1_t_SR) # distanced travelled normal to boundary return X1_t_SR-targetX def get_t(targetX,G,sigma,oldA): return root(step,1e-3,args=(targetX,G,sigma,oldA),method='lm').x[0] # + z01_sigma_sf = 10 G_t = np.empty_like(ubinmids) for i in range(len(G_t)): G_t[i] = gfromu(ubinmids[i],z_m[2],z00) targetX = 1 X1_t_SR = np.empty((len(G_t),len(sr[sr>0]))) U1_t_SR = np.empty((len(G_t),len(sr[sr>0]))) for i in np.arange(0,len(G_t)): a0_t_SR = a(G_t[i],z_m[2],z00,Z,L0) # upwind damping for the BCs A0_t_SR = G_t[i]/(1-1j*a0_t_SR/f) # upwind equilibrium wind vector for j in np.arange(0,len(sr[sr>0])): if j==0: Aold = A0_t_SR else: Aold = A1_t_SR t_temp = get_t(targetX,G_t[i],stdr_jun10savg[sr>0][j],Aold) z01 = stdr_jun10savg[sr>0][j]/z01_sigma_sf a1_t_SR = a(G_t[i],z_m[2],z01,Z,L0) # downwind damping for the BCs e1_t_SR = np.exp(-(a1_t_SR+1j*f)*t_temp) # decay downwind of boundary B1_t_SR = G_t[i]/(1-1j*a1_t_SR/f) # downwind equilibrium for the BCs A1_t_SR = (Aold-B1_t_SR)*e1_t_SR+B1_t_SR # downwind wind vector evolution U1_t_SR[i,j] = np.abs(A1_t_SR) # downwind wind speed evolution X1_t_SR[i,j] = sr[sr>0][j] # distanced travelled normal to boundary Q1_t_SR = flux(U1_t_SR,z_m[2],z00) # sediment flux from winds # put this range of forcing conditions together such that they occur with the same # likelihood as reality tempu_SR = U1_t_SR*pu2a[:,np.newaxis] # wind speeds scaled by likelihood of being measured at met_a tempq_SR = Q1_t_SR*pu2a[:,np.newaxis] # sediment flux scaled as above # create transects X_tbins = np.linspace(0,np.ceil(np.max(sr)/100)*100,int((np.ceil(np.max(sr)/100)))+1) X_t = X_tbins[1:] - np.diff(X_tbins)[0]/2 U_t_SR = np.empty([np.shape(X_t)[0],np.shape(tempu_SR)[0]]) Q_t_SR = np.empty([np.shape(X_t)[0],np.shape(tempu_SR)[0]]) # for a given distance bin downwind, get the mean of the speeds and fluxes for each forcing condition for i in range(len(X_t)): for j in range(len(tempu_SR)): U_t_SR[i,j] = np.mean(tempu_SR[j][(X1_t_SR[j]>X_tbins[i])&(X1_t_SR[j]<=X_tbins[i+1])]) Q_t_SR[i,j] = np.mean(tempq_SR[j][(X1_t_SR[j]>X_tbins[i])&(X1_t_SR[j]<=X_tbins[i+1])]) # get the net speed and flux transect U_t_SR = np.sum(U_t_SR,axis=1) Q_t_SR = np.sum(Q_t_SR,axis=1) # + # get the range in wind speeds measured at 2 m for each met tower across a range of forcing conditions Ng = 10 # number of forcing steps G = np.linspace(1,50,Ng) # the geostrophic flow speed forcing X_SR = np.empty((len(G),len(sr[sr>0]))) U_SR = np.empty((len(G),len(sr[sr>0]))) for i in np.arange(0,len(G)): a0_SR = a(G[i],z_m[2],z00,Z,L0) # upwind damping for the BCs A0_SR = G[i]/(1-1j*a0_SR/f) # upwind equilibrium wind vector for j in np.arange(0,len(sr[sr>0])): if j==0: Aold = A0_SR else: Aold = A_SR t_temp = get_t(targetX,G[i],stdr_jun10savg[sr>0][j],Aold) z01 = stdr_jun10savg[sr>0][j]/z01_sigma_sf a_SR = a(G[i],z_m[2],z01,Z,L0) # downwind damping for the BCs e_SR = np.exp(-(a_SR+1j*f)*t_temp) # decay downwind of boundary B_SR = G[i]/(1-1j*a_SR/f) # downwind equilibrium for the BCs A_SR = (Aold-B_SR)*e_SR+B_SR # downwind wind vector evolution U_SR[i,j] = np.abs(A_SR) # downwind wind speed evolution X_SR[i,j] = sr[sr>0][j] # distanced travelled normal to boundary # 2 m wind speed for each met tower because s_X_o is the distance from the boundary U0a_SR = U_SR[np.arange(0,Ng),np.argmin(np.abs(X_SR-s_a_o),axis=-1)] U0b_SR = U_SR[np.arange(0,Ng),np.argmin(np.abs(X_SR-s_b_o),axis=-1)] U0c_SR = U_SR[np.arange(0,Ng),np.argmin(np.abs(X_SR-s_c_o),axis=-1)] # - plt.plot(U0a-U0b,U0a-U0c,'grey',lw=lw*1.5) plt.plot(U0a_SR-U0b_SR,U0a_SR-U0c_SR,'k',lw=lw*1.5) plt.axis('square') plt.xlim(-6,6) plt.ylim(-6,6) # + # each forcing scenario in color and the net transect in black fig = plt.gcf() plt.subplot(211) for i in range(len(U1_t_SR)): plt.plot(X1_t_SR[i]*m2km,U1_t_SR[i]) plt.plot(X_t*m2km,U_t_SR,'k') plt.xlim(0,np.max(X1_t_SR*m2km)) plt.ylim(0,np.nanmax(U1_t_SR)) plt.ylabel('$U_{10}$ (m/s)',fontsize=fs*2) plt.xticks([],[]) plt.yticks(fontsize=fs*2) plt.subplot(212) for i in range(len(U1_t_SR)): plt.plot(X1_t_SR[i]*m2km,Q1_t_SR[i]/s2y) plt.plot(X_t*m2km,Q_t_SR/s2y,'k') plt.xlim(0,np.max(X1_t_SR*m2km)) plt.ylim(0,np.nanmax(Q1_t_SR)/s2y) plt.xlabel('distance (km)',fontsize=fs*2) plt.ylabel('$q_{s}$ (m$^{2}$/yr)',fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) fig.set_size_inches(6, 6, forward=True) # + lw = 0.5 S = 40 A = 0.5 i = 4800 templ = LineString([(np.min(xr),yr[i]),(np.max(xr),yr[i])]) tempp = templ.intersection(upwind_boundary_r) uwi = np.argmin(np.abs(xr-tempp.coords[0][0])) fig = plt.gcf() ax0 = plt.subplot(311) plt.plot(sr*m2km,stdr_jun10savg,c='y',lw=lw*1.5,label='Jun10') plt.legend(frameon=False,fontsize=fs*2) plt.ylabel('$\\sigma_{\\eta}$ (m)',fontsize=fs*2) plt.xticks([],[]) plt.yticks(fontsize=fs*2) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) ax1 = plt.subplot(312) plt.scatter([s_a_o*m2km,s_b_o*m2km,s_c_o*m2km],[np.mean(u2aca),np.mean(u2bca),np.mean(u2cca)],c='c',marker='v',s=S,alpha=A,lw=0,label='Met') plt.plot([s_a_o*m2km,s_a_o*m2km],[np.percentile(u2aca,25),np.percentile(u2aca,75)],c='c',alpha=A,lw=lw) plt.plot([s_b_o*m2km,s_b_o*m2km],[np.percentile(u2bca,25),np.percentile(u2bca,75)],c='c',alpha=A,lw=lw) plt.plot([s_c_o*m2km,s_c_o*m2km],[np.percentile(u2cca,25),np.percentile(u2cca,75)],c='c',alpha=A,lw=lw) plt.plot(X_t*m2km,U_t,'grey',lw=lw*1.5,label='Theory') plt.plot(X_t*m2km,U_t_SR,'k',lw=lw*1.5,label='Theory SR') plt.plot([(xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,X_t[0]*m2km],[U_t[0],U_t[0]],'grey',lw=lw*1.5) plt.legend(frameon=False,fontsize=fs*2) plt.ylabel('$U_{10}$ (m/s)',fontsize=fs*2) plt.xticks([],[]) plt.yticks(fontsize=fs*2) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) ax1.yaxis.set_label_position("right") ax1.yaxis.tick_right() ax2 = plt.subplot(313) plt.plot(sr*m2km,qavg[0]/s2y,c='r',lw=lw*1.5,label='Jan09-Sep09') plt.fill_between(sr*m2km,qp25[0]/s2y,qp75[0]/s2y,alpha=0.1,lw=0,color='b') plt.plot(sr*m2km,qavg[1]/s2y,c='b',lw=lw*1.5,label='Sep09-Jun10') plt.fill_between(sr*m2km,qp25[1]/s2y,qp75[1]/s2y,alpha=0.1,lw=0,color='r') plt.plot(sr*m2km,qavg[2]/s2y,c='g',lw=lw*1.5,label='Jan09-Jun10') plt.fill_between(sr*m2km,qp25[2]/s2y,qp75[2]/s2y,alpha=0.1,lw=0,color='g') plt.scatter([s_b_o*m2km,s_c_o*m2km],[q2bca_av/s2y,q2cca_av/s2y],c='c',marker='v',s=S,alpha=A,lw=0) plt.plot(X_t*m2km,Q_t/s2y,'grey',lw=lw*1.5) plt.plot(X_t*m2km,Q_t_SR/s2y,'k',lw=lw*1.5) plt.legend(frameon=False,fontsize=fs*2) plt.xlabel('$S_d$ (km)',fontsize=fs*2) plt.ylabel('$q_s$ (m$^2$/s)',fontsize=fs*2) plt.xticks(fontsize=fs*2) plt.yticks(fontsize=fs*2) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) fig.set_size_inches(12,12, forward=True) # - # # Figures # ## Figure 1 ds = gdal.Open(sep09, gdal.GA_ReadOnly) rb = ds.GetRasterBand(1) metaxy = ds.GetGeoTransform() z_sep09 = np.flip(rb.ReadAsArray(),axis=0) x = metaxy[0]+metaxy[1]*np.arange(0,np.shape(z_sep09)[1]) y = np.flip(metaxy[3]+metaxy[5]*np.arange(0,np.shape(z_sep09)[0])) X,Y = np.meshgrid(x,y) # + lw = 0.5 S = 40 A = 0.5 Xi1 = 3.76e+5 Xf1 = Xi1 + 5e+2 Yi1 = 3.633e+6 Yf1 = Yi1 + 5e+2 xi1 = np.argmin(np.abs(x-Xi1)) xf1 = np.argmin(np.abs(x-Xf1)) yi1 = np.argmin(np.abs(y-Yi1)) yf1 = np.argmin(np.abs(y-Yf1)) Xi2 = 3.79e+5 Xf2 = Xi2 + 5e+2 Yi2 = 3.633e+6 Yf2 = Yi2 + 5e+2 xi2 = np.argmin(np.abs(x-Xi2)) xf2 = np.argmin(np.abs(x-Xf2)) yi2 = np.argmin(np.abs(y-Yi2)) yf2 = np.argmin(np.abs(y-Yf2)) uw_boundary = LineCollection([upwind_boundary_o], alpha=1,color='b', lw=lw*1.5) jan09_domain = PolygonPatch(jan09_domain_o, fc='none', ec='g', lw=lw*1.5) sep09_domain = PolygonPatch(sep09_domain_o, fc='none', ec='g', lw=lw*1.5) jun10_domain = PolygonPatch(jun10_domain_o, fc='none', ec='g', lw=lw*1.5) box1 = PolygonPatch(Polygon([(Xi1,Yi1),(Xi1,Yf1),(Xf1,Yf1),(Xf1,Yi1)]), fc='none', ec='k', lw=lw*1.5) box2 = PolygonPatch(Polygon([(Xi2,Yi2),(Xi2,Yf2),(Xf2,Yf2),(Xf2,Yi2)]), fc='none', ec='k', lw=lw*1.5) ds = 5e+3 x00 = 3.72e+5 y00 = 3.636e+6 cu = 'c' ca = 'r' cs = 'y' cr = 'm' dune = Rectangle([0.003,0.001],0.0015,5-0.001,linewidth=0,edgecolor='none',facecolor=cs,alpha=A,zorder=-1) met = Rectangle([0.006,2],0.0015,10-2,linewidth=0,edgecolor='none',facecolor=cu,alpha=A,zorder=-1) lidar = Rectangle([0.009,10],0.0015,300-10,linewidth=0,edgecolor='none',facecolor=cr,alpha=A,zorder=-1) abl = Rectangle([0.012,100],0.0015,2000-100,linewidth=0,edgecolor='none',facecolor=ca,alpha=A,zorder=-1) fig = plt.gcf() ax0 = plt.subplot2grid((2, 7), (0, 0), colspan=4, rowspan=2) plt.text(x00,y00-2*ds,s='$\\theta_d$ $x$ $y$ $x_d$ $y_d$ $U^a$ $U^b$ $U^c$ \n roughness transition (N) \n $L^a$ $L^b$ $L^c$ b c',fontsize=fs) ax0.add_collection(uw_boundary) ax0.add_patch(jan09_domain) ax0.add_patch(sep09_domain) ax0.add_patch(jun10_domain) ax0.add_patch(box1) ax0.add_patch(box2) ax0.scatter(x00,y00+ds,c='k',marker='^',s=20,lw=0) ax0.scatter([x_a_o,x_b_o],[y_a_o,y_b_o],c='m',marker='^',s=S,alpha=A,lw=0) ax0.scatter([x_a_o,x_b_o,x_c_o],[y_a_o,y_b_o,y_c_o],c='c',marker='v',s=S,alpha=A,lw=0) ax0.plot([x00,x00+ds],[y00,y00],c='k',lw=lw*1.5) ax0.plot([x00,x00],[y00,y00+ds],c='k',lw=lw*1.5) ax0.plot([x00,x00+ds*np.cos(af0*np.pi/180)],[y00,y00+ds*np.sin(af0*np.pi/180)],c='grey',lw=lw*1.5) ax0.plot([x00,x00-ds*np.sin(af0*np.pi/180)],[y00,y00+ds*np.cos(af0*np.pi/180)],c='grey',lw=lw*1.5) ax0.axis('equal') ax0.set_ylim(3.6275e+6,3.6400e+6) ax0.set_xticks([],[]) ax0.set_yticks([],[]) plt.text(x00,y00-2*ds,'a b c d',fontsize=12) ax1 = plt.subplot2grid((2, 7), (0, 4), colspan=2, rowspan=1) z0 = 1194 dz = 2 s = ax1.pcolormesh(X[yi1:yf1,xi1:xf1],Y[yi1:yf1,xi1:xf1],z_sep09[yi1:yf1,xi1:xf1]-z0,vmin=0,vmax=dz,rasterized=True) c1 = plt.colorbar(s,ax=ax1, ticks=[0, 1, 2]) c1.set_label('$\\delta\\eta$ (m)',fontsize=fs) c1.ax.set_yticklabels(['0','1','2'],fontsize=fs) ax1.axis('equal') ax1.set_xticks([],[]) ax1.set_yticks([],[]) ax2 = plt.subplot2grid((2, 7), (1, 4), colspan=2, rowspan=1) z0 = 1206 dz = 12 s2 = ax2.pcolormesh(X[yi2:yf2,xi2:xf2],Y[yi2:yf2,xi2:xf2],z_sep09[yi2:yf2,xi2:xf2]-z0,vmin=0,vmax=dz,rasterized=True) c2 = plt.colorbar(s2,ax=ax2, ticks=[0, 6, 12]) c2.set_label('$\\delta\\eta$ (m)',fontsize=fs) c2.ax.set_yticklabels(['0','6','12'],fontsize=fs) ax2.axis('equal') ax2.set_xticks([],[]) ax2.set_yticks([],[]) ax3 = plt.subplot2grid((2, 7), (0, 6), colspan=1, rowspan=2) s = ax3.add_patch(met) s = ax3.add_patch(lidar) s = ax3.add_patch(dune) s = ax3.add_patch(abl) plt.ylabel('$z$ (m)',fontsize=fs) plt.xlim([0,0.015]) plt.ylim([0.01,2000]) ax3.set_yscale('log') ax3.spines['left'].set_visible(False) ax3.spines['top'].set_visible(False) ax3.spines['bottom'].set_visible(False) ax3.yaxis.tick_right() ax3.yaxis.set_label_position("right") plt.xticks([],[]) plt.yticks(fontsize=fs) ax0.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) ax1.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(lw) ax2.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax2.spines[axis].set_linewidth(lw) ax3.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax3.spines[axis].set_linewidth(lw) c1.ax.tick_params(width=lw,which='both',direction='in') c1.outline.set_linewidth(lw) c2.ax.tick_params(width=lw,which='both',direction='in') c2.outline.set_linewidth(lw) fig.subplots_adjust(wspace=0.5) fig.subplots_adjust(hspace=0.15) fig.subplots_adjust(bottom=0.05) fig.subplots_adjust(top=0.96) fig.subplots_adjust(right=0.9) fig.subplots_adjust(left=0.01) fig.set_size_inches(142.5/25.4, 142.5/25.4/2.4, forward=True) plt.savefig('figures/fig1_0.pdf',dpi=300) # + da = 15 mAf = np.arange(-180,180+da,da) mAmidf = mAf[1:]-da/2 pq2aa = np.empty(len(mAmidf)) for i in range(len(mAmidf)): pq2aa[i] = np.sum(q2a[(aa>=mAf[i])&(aa<mAf[i+1])]) pq2aa = pq2aa/np.sum(q2a) lw = 0.5 fig = plt.gcf() ax0 = plt.subplot2grid((2, 7), (0, 0), colspan=3, rowspan=3,projection='polar') for i in range(len(mAmidf)): sc = ax0.plot([(mAf[i]+af0)/180*np.pi,(mAf[i]+af0)/180*np.pi],[0,pq2aa[i]],color='r',lw=lw) sc = ax0.plot([(mAf[i+1]+af0)/180*np.pi,(mAf[i+1]+af0)/180*np.pi],[0,pq2aa[i]],color='r',lw=lw) tempt = (np.linspace(mAf[i],mAf[i+1],10)+af0)/180*np.pi tempr = np.ones_like(tempt)*pq2aa[i] sc = ax0.plot(tempt,tempr,color='r',lw=lw) ax0.plot([0,0],[0,0.25],color='k',label='$P(q_s) = 0.25$',lw=lw) plt.legend(frameon=False,fontsize=fs) plt.axis('off') fig.subplots_adjust(wspace=0.5) fig.subplots_adjust(hspace=0.15) fig.subplots_adjust(bottom=0.05) fig.subplots_adjust(top=0.96) fig.subplots_adjust(right=0.9) fig.subplots_adjust(left=0.01) fig.set_size_inches(142.5/25.4, 142.5/25.4/2.4, forward=True) plt.savefig('figures/fig1_1.pdf',dpi=300) # - # ## Figure 2 # + lw = 0.5 i = 4800 templ = LineString([(np.min(xr),yr[i]),(np.max(xr),yr[i])]) tempp = templ.intersection(upwind_boundary_r) uwi = np.argmin(np.abs(xr-tempp.coords[0][0])) zmin = 1190 zmax = 1230 fig = plt.gcf() ax0 = plt.subplot(111) plt.fill_between((xr-xr[uwi])*m2km,zr_jun10[i,:],np.ones(np.shape(xr)[0])*zmin,color='k',lw=0,alpha=0.15) plt.xlabel('$S_d$ (km)',fontsize=fs) plt.ylabel('$\\eta$ (m)',fontsize=fs) plt.xticks(fontsize=fs) plt.yticks([1200,1210],fontsize=fs) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) plt.ylim(zmin,zmax) ax0.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) fig.subplots_adjust(hspace=0) fig.subplots_adjust(bottom=0.12) fig.subplots_adjust(top=0.99) fig.subplots_adjust(right=0.92) fig.subplots_adjust(left=0.1) fig.set_size_inches(142.5/25.4, 142.5/25.4/1.5, forward=True) plt.savefig('figures/fig2_0.pdf', dpi=300) # + lw = 0.5 S = 40 A = 0.5 i = 4800 templ = LineString([(np.min(xr),yr[i]),(np.max(xr),yr[i])]) tempp = templ.intersection(upwind_boundary_r) uwi = np.argmin(np.abs(xr-tempp.coords[0][0])) fig = plt.gcf() ax0 = plt.subplot(311) plt.plot(sr*m2km,stdr_jun10savg,c='y',lw=lw*1.5,label='Jun10') plt.legend(frameon=False,fontsize=fs) plt.ylabel('$\\sigma_{\\eta}$ (m)',fontsize=fs) plt.xticks([],[]) plt.yticks(fontsize=fs) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) ax1 = plt.subplot(312) plt.scatter([s_a_o*m2km,s_b_o*m2km,s_c_o*m2km],[np.mean(u2aca),np.mean(u2bca),np.mean(u2cca)],c='c',marker='v',s=S,alpha=A,lw=0,label='Met') plt.plot([s_a_o*m2km,s_a_o*m2km],[np.percentile(u2aca,25),np.percentile(u2aca,75)],c='c',alpha=A,lw=lw) plt.plot([s_b_o*m2km,s_b_o*m2km],[np.percentile(u2bca,25),np.percentile(u2bca,75)],c='c',alpha=A,lw=lw) plt.plot([s_c_o*m2km,s_c_o*m2km],[np.percentile(u2cca,25),np.percentile(u2cca,75)],c='c',alpha=A,lw=lw) plt.plot(X_t*m2km,U_t,'grey',lw=lw*1.5,label='Theory') plt.plot([(xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,X_t[0]*m2km],[U_t[0],U_t[0]],'grey',lw=lw*1.5) plt.legend(frameon=False,fontsize=fs) plt.ylabel('$U_{10}$ (m/s)',fontsize=fs) ax1.yaxis.set_label_position("right") ax1.yaxis.tick_right() plt.xticks([],[],fontsize=fs) plt.yticks(fontsize=fs) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) plt.text(0,4,'a b c',fontsize=12) ax2 = plt.subplot(313) plt.plot(sr*m2km,qavg[0]/s2y,c='r',lw=lw*1.5,label='Jan09-Sep09') plt.fill_between(sr*m2km,qp25[0]/s2y,qp75[0]/s2y,alpha=0.1,lw=0,color='b') plt.plot(sr*m2km,qavg[1]/s2y,c='b',lw=lw*1.5,label='Sep09-Jun10') plt.fill_between(sr*m2km,qp25[1]/s2y,qp75[1]/s2y,alpha=0.1,lw=0,color='r') plt.plot(sr*m2km,qavg[2]/s2y,c='g',lw=lw*1.5,label='Jan09-Jun10') plt.fill_between(sr*m2km,qp25[2]/s2y,qp75[2]/s2y,alpha=0.1,lw=0,color='g') plt.scatter([s_b_o*m2km,s_c_o*m2km],[q2bca_av/s2y,q2cca_av/s2y],c='c',marker='v',s=S,alpha=A,lw=0) plt.plot(X_t*m2km,Q_t/s2y,'grey',lw=lw*1.5) plt.legend(frameon=False,fontsize=fs) plt.xlabel('$S_d$ (km)',fontsize=fs) plt.ylabel('$q_s$ (m$^2$/s)',fontsize=fs) plt.xticks(fontsize=fs) plt.yticks([0,10,20],fontsize=fs) plt.xlim((xr[np.min(np.argwhere(~np.isnan(zr_jun10[i,:])))]-xr[uwi])*m2km,10) ax0.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) ax1.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(lw) ax2.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax2.spines[axis].set_linewidth(lw) fig.subplots_adjust(hspace=0) fig.subplots_adjust(bottom=0.12) fig.subplots_adjust(top=0.99) fig.subplots_adjust(right=0.92) fig.subplots_adjust(left=0.1) fig.set_size_inches(142.5/25.4, 142.5/25.4/1.5, forward=True) plt.savefig('figures/fig2_1.pdf', dpi=300) # - # ## Figure 3 # + [markdown] heading_collapsed=true # ### old # + hidden=true fig = plt.gcf() ax0 = plt.subplot(141) plt.plot(U0a-U0b,U0a-U0c,'grey',lw=lw*1.5) hist = np.histogram2d(u0aca-u0bca,u0aca-u0cca,bins=[np.linspace(-6,6,40),np.linspace(-6,6,40)]) hist[0][hist[0]==0] = np.nan s0 = plt.pcolormesh(hist[1],hist[2],hist[0].T,rasterized=True,vmin=0,vmax=20,cmap='plasma') plt.xlabel('$U_{2}^a-U_{2}^b$ (m/s)',fontsize=fs) plt.ylabel('$U_{2}^a-U_{2}^c$ (m/s)',fontsize=fs) plt.xlim(-6,6) plt.ylim(-6,6) plt.gca().set_aspect('equal') plt.xticks([-6,0,6],fontsize=fs) plt.yticks([-6,0,6],fontsize=fs) plt.text(-6,-6,'a b c d',fontsize=12) ax1 = plt.subplot(142) plt.plot(uavl1,z_l1[:-1],'-o',markersize=lw*1.5,lw=lw*1.5,c='r') plt.fill_betweenx(z_l1[:-1],u25l1,u75l1,color='r',alpha=0.25,lw=0) tempz = z_l2[:-1] tempz[-1] = 77 plt.plot(uavl2,tempz,'-o',markersize=lw*1.5,lw=lw*1.5,c='b') plt.fill_betweenx(tempz,u25l2,u75l2,color='b',alpha=0.25,lw=0) plt.yscale('log') plt.xlabel('$U/U_{77}$ (m/s)',fontsize=fs) plt.ylabel('$z$ (m)',fontsize=fs) plt.xlim(0.3,1.4) plt.ylim(10**1,10**2) ax1.yaxis.set_minor_formatter(NullFormatter()) plt.xticks(fontsize=fs) plt.yticks([1e+1,1e+2],fontsize=fs) ax2 = plt.subplot(143) plt.plot(u1_la_binned,u11_l1a_binnedav,'-o',markersize=lw,lw=lw,c='r',label='$U^a$') plt.plot(u1_la_binned,u11_l2a_binnedav,'-o',markersize=lw,lw=lw,c='b',label='$U^b$') plt.fill_between(u1_la_binned,u11_l1a_binned25,u11_l1a_binned75,color='r',alpha=0.25,lw=0) plt.fill_between(u1_la_binned,u11_l2a_binned25,u11_l2a_binned75,color='b',alpha=0.25,lw=0) plt.xlabel('$U_{10}$ (m/s)',fontsize=fs) plt.ylabel('$U_{300}$ (m/s)',fontsize=fs) plt.legend(frameon=False,loc=0,handlelength=lw*2,fontsize=fs) plt.xlim(0,15) plt.ylim(0,20) plt.xticks([0,5,10,15],fontsize=fs) plt.yticks([0,10,20],fontsize=fs) ax3 = plt.subplot(144) plt.plot(al0_mbz,z_t,'r',markersize=lw,lw=lw) plt.plot(al1_mbz,z_t,'b',markersize=lw,lw=lw) plt.plot(a_l,z_l1[:-1],'-o',color='r',markersize=lw,lw=lw) plt.plot(a_les,z_les,color='b',linestyle='--',markersize=lw,lw=lw,label='W\&A') plt.xscale('log') plt.yscale('log') plt.xlabel('$|\\alpha|$ (1/s)',fontsize=fs) plt.ylabel('$z$ (m)',fontsize=fs) plt.legend(frameon=False,loc=1,handlelength=lw*2,fontsize=fs) plt.xlim(1e-5,2e-3) plt.ylim(1e+0,1e+2) plt.xticks([1e-5,1e-3],fontsize=fs) plt.yticks([1e+0,1e+1,1e+2],fontsize=fs) fig.subplots_adjust(wspace=0.6) fig.subplots_adjust(bottom=0.33) fig.subplots_adjust(top=0.9) fig.subplots_adjust(right=0.99) fig.subplots_adjust(left=0.19) cbar_ax = fig.add_axes([0.08, 0.33, 0.01, 0.57]) c0 = fig.colorbar(s0, cax=cbar_ax, ticks=[0,10,20]) cbar_ax.yaxis.set_label_position('left') cbar_ax.yaxis.set_ticks_position('left') c0.set_label('$n$',fontsize=fs) c0.ax.set_yticklabels(['$0$','$10$','$20$'],fontsize=fs) ax0.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) ax1.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(lw) ax2.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax2.spines[axis].set_linewidth(lw) ax3.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax3.spines[axis].set_linewidth(lw) c0.ax.tick_params(width=lw) c0.outline.set_linewidth(lw) fig.set_size_inches(142.5/25.4, 142.5/25.4/4.15, forward=True) plt.savefig('figures/fig3.pdf', dpi=300) # - # ### new # + fig = plt.gcf() ax0 = plt.subplot(141) plt.plot(U0a-U0b,U0a-U0c,'grey',lw=lw*1.5) hist = np.histogram2d(u0aca-u0bca,u0aca-u0cca,bins=[np.linspace(-6,6,40),np.linspace(-6,6,40)]) hist[0][hist[0]==0] = np.nan s0 = plt.pcolormesh(hist[1],hist[2],hist[0].T,rasterized=True,vmin=0,vmax=20,cmap='plasma') plt.xlabel('$U_{2}^a-U_{2}^b$ (m/s)',fontsize=fs) plt.ylabel('$U_{2}^a-U_{2}^c$ (m/s)',fontsize=fs) plt.xlim(-6,6) plt.ylim(-6,6) plt.gca().set_aspect('equal') plt.xticks([-6,0,6],fontsize=fs) plt.yticks([-6,0,6],fontsize=fs) plt.text(-6,-6,'a b c d',fontsize=12) ax1 = plt.subplot(142) plt.plot(uavl1,z_l1[:-1],'-o',markersize=lw*1.5,lw=lw*1.5,c='r') plt.fill_betweenx(z_l1[:-1],u25l1,u75l1,color='r',alpha=0.25,lw=0) tempz = z_l2[:-1] tempz[-1] = 77 plt.plot(uavl2,tempz,'-o',markersize=lw*1.5,lw=lw*1.5,c='b') plt.fill_betweenx(tempz,u25l2,u75l2,color='b',alpha=0.25,lw=0) plt.yscale('log') plt.xlabel('$U/U_{77}$ (m/s)',fontsize=fs) plt.ylabel('$z$ (m)',fontsize=fs) plt.xlim(0.3,1.4) plt.ylim(10**1,10**2) ax1.yaxis.set_minor_formatter(NullFormatter()) plt.xticks(fontsize=fs) plt.yticks([1e+1,1e+2],fontsize=fs) ax2 = plt.subplot(143) plt.plot(u1_la_binned,u11_l1a_binnedav,'-o',markersize=lw,lw=lw,c='r',label='$U^a$') plt.plot(u1_la_binned,u11_l2a_binnedav,'-o',markersize=lw,lw=lw,c='b',label='$U^b$') plt.fill_between(u1_la_binned,u11_l1a_binned25,u11_l1a_binned75,color='r',alpha=0.25,lw=0) plt.fill_between(u1_la_binned,u11_l2a_binned25,u11_l2a_binned75,color='b',alpha=0.25,lw=0) plt.xlabel('$U_{10}$ (m/s)',fontsize=fs) plt.ylabel('$U_{300}$ (m/s)',fontsize=fs) plt.legend(frameon=False,loc=0,handlelength=lw*2,fontsize=fs) plt.xlim(0,15) plt.ylim(0,20) plt.xticks([0,5,10,15],fontsize=fs) plt.yticks([0,10,20],fontsize=fs) ax3 = plt.subplot(144) plt.plot(al0_mbz,z_t,'r',markersize=lw,lw=lw) plt.plot(al1_mbz,z_t,'b',markersize=lw,lw=lw) plt.plot(a_l,z_l1[:-1],'-o',color='r',markersize=lw,lw=lw) plt.plot(a_les,z_les,color='b',linestyle='--',markersize=lw,lw=lw,label='W\&A') plt.xscale('log') plt.yscale('log') plt.xlabel('$|\\alpha|$ (1/s)',fontsize=fs) plt.ylabel('$z$ (m)',fontsize=fs) plt.legend(frameon=False,loc=1,handlelength=lw*2,fontsize=fs) plt.xlim(1e-5,2e-3) plt.ylim(1e+0,1e+2) plt.xticks([1e-5,1e-4,1e-3],fontsize=fs) plt.yticks([1e+0,1e+1,1e+2],fontsize=fs) fig.subplots_adjust(wspace=0.45) fig.subplots_adjust(bottom=0.22) fig.subplots_adjust(top=0.93) fig.subplots_adjust(right=0.99) fig.subplots_adjust(left=0.14) cbar_ax = fig.add_axes([0.05, 0.22, 0.01, 0.93-0.22]) c0 = fig.colorbar(s0, cax=cbar_ax, ticks=[0,10,20]) cbar_ax.yaxis.set_label_position('left') cbar_ax.yaxis.set_ticks_position('left') c0.set_label('$n$',fontsize=fs) c0.ax.set_yticklabels(['$0$','$10$','$20$'],fontsize=fs) ax0.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) ax1.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(lw) ax2.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax2.spines[axis].set_linewidth(lw) ax3.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax3.spines[axis].set_linewidth(lw) c0.ax.tick_params(width=lw,which='both',direction='in') c0.outline.set_linewidth(lw) fig.set_size_inches(190/25.4, 142.5/25.4/3.5, forward=True) plt.savefig('figures/fig3.pdf', dpi=300) # - # ## Figure 4 # + [markdown] heading_collapsed=true # ### old # + hidden=true c=cm.viridis_r(np.linspace(0,1,Nz0)) lw=0.5 Np = 3 z0_i = np.linspace(0.2,0.8,Np)*Nz0 S = 10 A = 0.5 fig = plt.gcf() ax0 = plt.subplot(131) for i in z0_i.astype(int): for j in z0_i.astype(int): if i==j: ax0.plot([1e-1,1e+3],[U1[i,j,0],U1[i,j,0]],'k',lw=lw) ax0.fill_between([1e-1,1e+3],[(1-eps)*U1[i,j,0],(1-eps)*U1[i,j,0]],[(1+eps)*U1[i,j,0],(1+eps)*U1[i,j,0]],color='k',alpha=A,lw=0) else: ax0.plot(X1[i,j,:]*m2km,U1[i,j,:],c=c[i],lw=lw*1.5) ax0.plot([0,0],[0,0],'k',lw=lw,label='$(1\\pm\\varepsilon)U_{sat}$') plt.xscale('log') plt.xlim(1e-1,1e+3) plt.ylim(2,18) plt.xticks(fontsize=fs) plt.yticks([2,10,18],fontsize=fs) plt.xlabel('$S_{d}$ (km)',fontsize=fs) plt.ylabel('$U_{10}$ (m/s)',fontsize=fs) plt.legend(frameon=False,handlelength=lw*2,fontsize=fs) plt.text(1e-1,3,'a b c',fontsize=12) ax1 = plt.subplot(132) for i in np.arange(0,len(Z0r),4): plt.plot(Z0r[i,:],DZDTr[i,:]/s2y*m2mm,c=c[i],lw=lw) # plt.fill_between([np.min(Z0r),np.max(Z0r)],[dzdtr_k0725,dzdtr_k0725],[dzdtr_k0775,dzdtr_k0775],color='y',alpha=A,lw=0) plt.fill_between([np.min(Z0r),np.max(Z0r)],[dzdtr_k070,dzdtr_k070],[dzdtr_k07100,dzdtr_k07100],color='y',alpha=A,lw=0) plt.plot([np.min(Z0r),np.max(Z0r)],[dzdtr_k07av,dzdtr_k07av],c='y',lw=lw,label='K\&al') plt.scatter(Z0r[z0_1wsi,z0_0wsi],DZDTr_k07[z0_1wsi,z0_0wsi]/s2y*m2mm,c='r',s=S,lw=0,label='Pred') plt.plot([np.min(Z0r),np.max(Z0r)],[0,0],'--k',lw=lw) plt.xscale('log') plt.xlim(np.min(Z0r),np.max(Z0r)) plt.ylim(-2,4) plt.xticks([1e-5,1e+0,1e+5],fontsize=fs) plt.yticks([-2,0,4],fontsize=fs) plt.xlabel('$z_{0,out}/z_{0,in}$',fontsize=fs) plt.ylabel('$d\\eta/dt|_{S_{ws}}$ (mm/yr)',fontsize=fs) plt.legend(frameon=False,handlelength=lw*2,fontsize=fs) ax1.yaxis.set_minor_formatter(NullFormatter()) ax2 = plt.subplot(133) s1 = ax2.pcolormesh(Z0_0,Z0_1,np.log10(LSb*m2km),cmap='magma',vmin=1,vmax=2,rasterized=True) s2 = ax2.contour(Z0_0,Z0_1,np.log10(LSb*m2km),[np.log10(s_r_m*m2km)],colors='y',linewidths=lw) ax2.scatter(z00,z01,c='r',s=S,lw=0) plt.xscale('log') plt.yscale('log') plt.xlim(np.min(Z0_0),np.max(Z0_0)) plt.ylim(np.min(Z0_1),np.max(Z0_1)) plt.xticks([1e-5,10**-2.5,1e+0],['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) plt.yticks([1e-5,10**-2.5,1e+0],['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) plt.xlabel('$z_{0,out}$ (m)',fontsize=fs) plt.ylabel('$z_{0,in}$ (m)',fontsize=fs) s2.collections[0].set_label('$S_{ws}$') plt.legend(frameon=False,handlelength=lw*2,fontsize=fs) ax2.yaxis.set_minor_formatter(NullFormatter()) fig.subplots_adjust(wspace=0.75) fig.subplots_adjust(bottom=0.3) fig.subplots_adjust(top=0.93) s0 = plt.scatter([0],[0],c=[0],cmap='viridis_r',vmin=np.log10(np.min(z0_0)),vmax=np.log10(np.max(z0_0))) fig.subplots_adjust(left=0.22) cbar_ax = fig.add_axes([0.12, 0.3, 0.01, 0.63]) c0 = fig.colorbar(s0, cax=cbar_ax, ticks=[-5,-2.5,0]) cbar_ax.yaxis.set_label_position('left') cbar_ax.yaxis.set_ticks_position('left') c0.set_label('$z_{0,in}$ (m)',fontsize=fs) c0.ax.set_yticklabels(['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) fig.subplots_adjust(right=0.88) cbar_ax = fig.add_axes([0.91, 0.3, 0.01, 0.63]) c1 = fig.colorbar(s1, cax=cbar_ax, ticks=[1,1.5,2]) c1.set_label('$S_{sat}$ (km)',fontsize=fs) c1.ax.set_yticklabels(['$10^{1}$','$10^{1.5}$','$10^{2}$'],fontsize=fs) ax0.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) ax1.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(lw) ax2.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax2.spines[axis].set_linewidth(lw) c0.ax.tick_params(width=lw) c0.outline.set_linewidth(lw) c1.ax.tick_params(width=lw) c1.outline.set_linewidth(lw) fig.set_size_inches(142.5/25.4, 142.5/25.4/4.4, forward=True) plt.savefig('figures/fig4.pdf', dpi=300) # - # ### new # + c=cm.viridis_r(np.linspace(0,1,Nz0)) lw=0.5 Np = 3 z0_i = np.linspace(0.2,0.8,Np)*Nz0 S = 10 A = 0.5 fig = plt.gcf() ax0 = plt.subplot(131) for i in z0_i.astype(int): for j in z0_i.astype(int): if i==j: ax0.plot([1e-1,1e+3],[U1[i,j,0],U1[i,j,0]],'k',lw=lw) ax0.fill_between([1e-1,1e+3],[(1-eps)*U1[i,j,0],(1-eps)*U1[i,j,0]],[(1+eps)*U1[i,j,0],(1+eps)*U1[i,j,0]],color='k',alpha=A,lw=0) else: ax0.plot(X1[i,j,:]*m2km,U1[i,j,:],c=c[i],lw=lw*1.5) ax0.plot([0,0],[0,0],'k',lw=lw,label='$(1\\pm\\varepsilon)U_{sat}$') plt.xscale('log') plt.xlim(1e-1,1e+3) plt.ylim(2,18) plt.xticks(fontsize=fs) plt.yticks([2,10,18],fontsize=fs) plt.xlabel('$S_{d}$ (km)',fontsize=fs) plt.ylabel('$U_{10}$ (m/s)',fontsize=fs) plt.legend(frameon=False,handlelength=lw*2,fontsize=fs) plt.text(1e-1,3,'a b c',fontsize=12) ax1 = plt.subplot(132) for i in np.arange(0,len(Z0r),4): plt.plot(Z0r[i,:],DZDTr[i,:]/s2y*m2mm,c=c[i],lw=lw) # plt.fill_between([np.min(Z0r),np.max(Z0r)],[dzdtr_k0725,dzdtr_k0725],[dzdtr_k0775,dzdtr_k0775],color='y',alpha=A,lw=0) plt.fill_between([np.min(Z0r),np.max(Z0r)],[dzdtr_k070,dzdtr_k070],[dzdtr_k07100,dzdtr_k07100],color='y',alpha=A,lw=0) plt.plot([np.min(Z0r),np.max(Z0r)],[dzdtr_k07[0],dzdtr_k07[0]],c='y',lw=lw,label='K\&al') plt.scatter(Z0r[z0_1wsi,z0_0wsi],DZDTr_k07[z0_1wsi,z0_0wsi]/s2y*m2mm,c='r',s=S,lw=0,label='Pred') plt.plot([np.min(Z0r),np.max(Z0r)],[0,0],'--k',lw=lw) plt.xscale('log') plt.xlim(np.min(Z0r),np.max(Z0r)) plt.ylim(-2,4) plt.xticks([1e-5,1e+0,1e+5],fontsize=fs) plt.yticks([-2,0,2,4],fontsize=fs) plt.xlabel('$z_{0,out}/z_{0,in}$',fontsize=fs) plt.ylabel('$d\\eta/dt|_{S_{ws}}$ (mm/yr)',fontsize=fs) plt.legend(frameon=False,handlelength=lw*2,fontsize=fs) ax1.yaxis.set_minor_formatter(NullFormatter()) z00 = 1e-4 # roughness length of upwind z01 = 1e-1 # roughness length of downwind ax2 = plt.subplot(133) s1 = ax2.pcolormesh(Z0_0,Z0_1,np.log10(LSb*m2km),cmap='magma',vmin=1,vmax=2,rasterized=True) s2 = ax2.contour(Z0_0,Z0_1,np.log10(LSb*m2km),[np.log10(s_r_m*m2km)],colors='y',linewidths=lw) ax2.scatter(z00,z01,c='r',s=S,lw=0) plt.xscale('log') plt.yscale('log') plt.axis('square') plt.xlim(np.min(Z0_0),np.max(Z0_0)) plt.ylim(np.min(Z0_1),np.max(Z0_1)) plt.xticks([1e-5,10**-2.5,1e+0],['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) plt.yticks([1e-5,10**-2.5,1e+0],['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) plt.xlabel('$z_{0,out}$ (m)',fontsize=fs) plt.ylabel('$z_{0,in}$ (m)',fontsize=fs) s2.collections[0].set_label('$S_{ws}$') plt.legend(frameon=False,handlelength=lw*2,fontsize=fs,loc=4) ax2.yaxis.set_minor_formatter(NullFormatter()) fig.subplots_adjust(wspace=0.5) fig.subplots_adjust(bottom=0.2) fig.subplots_adjust(top=0.93) s0 = plt.scatter([0],[0],c=[0],cmap='viridis_r',vmin=np.log10(np.min(z0_0)),vmax=np.log10(np.max(z0_0))) fig.subplots_adjust(left=0.16) cbar_ax = fig.add_axes([0.08, 0.2, 0.01, 0.93-0.2]) c0 = fig.colorbar(s0, cax=cbar_ax, ticks=[-5,-2.5,0]) cbar_ax.yaxis.set_label_position('left') cbar_ax.yaxis.set_ticks_position('left') c0.set_label('$z_{0,in}$ (m)',fontsize=fs) c0.ax.set_yticklabels(['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) fig.subplots_adjust(right=0.89) cbar_ax = fig.add_axes([0.92, 0.2, 0.01, 0.93-0.2]) c1 = fig.colorbar(s1, cax=cbar_ax, ticks=[1,1.5,2]) c1.set_label('$S_{sat}$ (km)',fontsize=fs) c1.ax.set_yticklabels(['$10^{1}$','$10^{1.5}$','$10^{2}$'],fontsize=fs) ax0.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) ax1.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(lw) ax2.tick_params(width=lw,which='both',direction='in') for axis in ['top','bottom','left','right']: ax2.spines[axis].set_linewidth(lw) c0.ax.tick_params(width=lw,which='both',direction='in') c0.outline.set_linewidth(lw) c1.ax.tick_params(width=lw,which='both',direction='in') c1.outline.set_linewidth(lw) fig.set_size_inches(190/25.4, 142.5/25.4/3, forward=True) plt.savefig('figures/fig4.pdf', dpi=300) # - # # Supplementary Figures # ## Figure S1 # + da = 5 mAf = np.arange(-180,180+da,da) mAmidf = mAf[1:]-da/2 fig = plt.gcf() ax0 = plt.subplot(111) plt.plot(mAmidf,pq1_l1,lw=lw,c='k',label='$D^{a}$') plt.plot(mAmidf,pq1_l2,lw=lw,c='grey',label='$D^{b}$') plt.plot(mAmidf,pq2a,lw=lw,c='c',label='$M^{a}$') plt.plot(mAmidf,pq2b,lw=lw,c='m',label='$M^{b}$') plt.plot(mAmidf,pq2c,lw=lw,c='y',label='$M^{c}$') plt.plot(ppd_bins-af0,pdd_jan09,lw=lw,c='r',label='$L^{a}$') plt.plot(ppd_bins-af0,pdd_sep09,lw=lw,c='g',label='$L^{b}$') plt.plot(ppd_bins-af0,pdd_jun10,lw=lw,c='b',label='$L^{c}$') plt.plot([0,0],[0,0.2],'--k',lw=lw,label='$\\theta_{d}$') plt.xlim(-180,180) plt.ylim(0,0.2) plt.xticks([-180,-90,0,90,180],['$\\theta_{d}-180$','$\\theta_{d}-90$','$\\theta_{d}$','$\\theta_{d}+90$','$\\theta_{d}+180$'],fontsize=fs) plt.yticks([0,0.1,0.2],fontsize=fs) plt.ylabel('$P$',fontsize=fs) plt.xlabel('$\\theta_q,\\theta_{d,local}$ ($^{\\circ}$)',fontsize=fs) plt.legend(frameon=False,handlelength=lw*2,fontsize=fs) fig.subplots_adjust(bottom=0.18) fig.subplots_adjust(top=0.96) fig.subplots_adjust(right=0.95) fig.subplots_adjust(left=0.1) ax0.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) fig.set_size_inches(142.5/25.4, 142.5/25.4/2, forward=True) plt.savefig('figures/figS1.pdf', dpi=300) # - # ## Figure S2 # + Xi2 = 3.81e+5 Xf2 = Xi2 + DS Yi2 = 3.634e+6 Yf2 = Yi2 + DS Xi2r = (Xi2+DS/2)*np.cos(af0*np.pi/180)+(Yi2+DS/2)*np.sin(af0*np.pi/180) - DS/2 Xf2r = Xi2r+DS Yi2r = -(Xi2+DS/2)*np.sin(af0*np.pi/180)+(Yi2+DS/2)*np.cos(af0*np.pi/180) - DS/2 Yf2r = Yi2r+DS da = 1/2 abins = np.arange(-360-da,360+2*da,2*da) abinmids = abins[1:] - da ds = gdal.Open(jan09, gdal.GA_ReadOnly) rb = ds.GetRasterBand(1) metaxy = ds.GetGeoTransform() z_jan09 = np.flip(rb.ReadAsArray(),axis=0) x = metaxy[0]+metaxy[1]*np.arange(0,np.shape(z_jan09)[1]) y = np.flip(metaxy[3]+metaxy[5]*np.arange(0,np.shape(z_jan09)[0])) X,Y = np.meshgrid(x,y) ds = gdal.Open(jan09r, gdal.GA_ReadOnly) rb = ds.GetRasterBand(1) metaxy = ds.GetGeoTransform() zr_jan09 = np.flip(rb.ReadAsArray(),axis=0) zr_jan09[zr_jan09<0] = np.nan xr = metaxy[0]+metaxy[1]*np.arange(0,np.shape(zr_jan09)[1]) yr = np.flip(metaxy[3]+metaxy[5]*np.arange(0,np.shape(zr_jan09)[0])) Xr,Yr = np.meshgrid(xr,yr) xi2 = np.argmin(np.abs(x-Xi2)) xf2 = np.argmin(np.abs(x-Xf2)) yi2 = np.argmin(np.abs(y-Yi2)) yf2 = np.argmin(np.abs(y-Yf2)) xi2r = np.argmin(np.abs(xr-Xi2r)) xf2r = np.argmin(np.abs(xr-Xf2r)) yi2r = np.argmin(np.abs(yr-Yi2r)) yf2r = np.argmin(np.abs(yr-Yf2r)) ae_jan09 = ae(z_jan09) dzdy_jan09 = np.gradient(z_jan09,axis=0) with open('DEMs/qs0.pkl', 'rb') as file: qs0 = pickle.load(file) tempq = qs0[int((yi2r+yf2r)/2),xi2r:xf2r] dzdt0c = demcorrect(dems[:-1],[dempairs[0]],demdts[:-1]) dzdt0c = np.squeeze(dzdt0c) temp = ae_jan09[yi2:yf2,xi2:xf2] tempa = temp[~np.isnan(temp)] acnt,_ = np.histogram(tempa,bins=abins) tempc = ridspuriousangles(acnt,abinmids) p = correlate(tempc,np.flip(tempc),mode='same') tempdd = abinmids[np.argmax(p)]/2 # + d0 = 2.5e+2 d1 = 2e+2 dzdtm = 5 lw = 0.5 fig = plt.gcf() ax1 = plt.subplot(221) s0 = ax1.pcolormesh(X[yi2:yf2,xi2:xf2],Y[yi2:yf2,xi2:xf2],dzdy_jan09[yi2:yf2,xi2:xf2],rasterized=True,cmap='binary') s1 = ax1.pcolormesh(X[yi2:yf2,xi2:xf2],Y[yi2:yf2,xi2:xf2],ae_jan09[yi2:yf2,xi2:xf2],vmin=-90,vmax=90,rasterized=True) ax1.plot([Xi2+d0,Xi2+d0+d1*np.cos(tempdd*np.pi/180)],[Yi2+d0,Yi2+d0+d1*np.sin(tempdd*np.pi/180)],'--m',lw=lw) ax1.axis('equal') plt.xticks([],[]) plt.yticks([],[]) ax1.set_xlabel('$x$',fontsize=fs) ax1.set_ylabel('$y$',fontsize=fs) ax2 = plt.subplot(223) s2 = ax2.pcolormesh(Xr[yi2r:yf2r,xi2r:xf2r],Yr[yi2r:yf2r,xi2r:xf2r],dzdt0c[yi2r:yf2r,xi2r:xf2r]/s2y,vmin=-dzdtm,vmax=dzdtm,rasterized=True,cmap='seismic') ax2.plot([Xi2r,Xf2r],[(Yi2r+Yf2r)/2,(Yi2r+Yf2r)/2],'c',lw=lw) ax2.axis('equal') plt.xticks([],[]) plt.yticks([],[]) ax2.set_xlabel('$x_d$',fontsize=fs) ax2.set_ylabel('$y_d$',fontsize=fs) ax3 = plt.subplot(222) s3 = ax3.plot(abinmids,acnt/np.sum(acnt),c='y',lw=lw,label='$P(\\theta_{sf})$') ax3.plot(abinmids/2,p/np.sum(p),c='m',lw=lw,label='$P(\\theta_{sf})\\star P(-\\theta_{sf})$') ax3.plot([tempdd,tempdd],[0,1.2*np.max(acnt/np.sum(acnt))],'-.m',lw=lw,label='$\\theta_{d,local}$') ax3.plot([af0,af0],[0,1.2*np.max(acnt/np.sum(acnt))],'--k',lw=lw,label='$\\theta_{d}$') plt.legend(frameon=False,handlelength=lw*2,fontsize=fs) plt.text(0,0,'a b c d',fontsize=12) ax3.set_xlim(-180,180) ax3.set_ylim(0,np.max(acnt/np.sum(acnt))) ax3.yaxis.set_label_position("right") ax3.yaxis.tick_right() plt.xticks([-180,-90,0,90,180],['$-180$','$-90$','$0$','$90$','$180$'],fontsize=fs) plt.yticks([0,0.01,0.02],fontsize=fs) ax3.set_xlabel('$\\theta_{sf}$ ($^{\\circ}$)',fontsize=fs) ax3.set_ylabel('$P$',fontsize=fs) ax4 = plt.subplot(224) ax4.plot(xr[xi2r:xf2r]-xr[xi2r]+20,tempq/s2y,c='c',lw=lw) ax4.set_xlim(0,5e+2) ax4.set_ylim(0,20) ax4.yaxis.set_label_position("right") ax4.yaxis.tick_right() plt.xticks([0,250,500],fontsize=fs) plt.yticks([0,10,20],fontsize=fs) ax4.set_xlabel('$x_d$ (m)',fontsize=fs) ax4.set_ylabel('$q_s$ (m$^2$/yr)',fontsize=fs) fig.subplots_adjust(wspace=0.15) fig.subplots_adjust(hspace=0.25) fig.subplots_adjust(bottom=0.09) fig.subplots_adjust(top=0.97) fig.subplots_adjust(right=0.91) fig.subplots_adjust(left=0.16) cbar_ax1 = fig.add_axes([0.1, 0.58, 0.01, 0.39]) c1 = fig.colorbar(s1, cax=cbar_ax1, ticks=[-90,0,90]) cbar_ax1.yaxis.set_label_position('left') cbar_ax1.yaxis.set_ticks_position('left') c1.set_label('$\\theta_{sf}$ ($^{\\circ}$)',fontsize=fs) c1.ax.set_yticklabels(['-90','0','90'],fontsize=fs) cbar_ax2 = fig.add_axes([0.1, 0.09, 0.01, 0.39]) c2 = fig.colorbar(s2, cax=cbar_ax2, ticks=[-5,0,5]) cbar_ax2.yaxis.set_label_position('left') cbar_ax2.yaxis.set_ticks_position('left') c2.set_label('$d\\eta/dt$ (m/yr)',fontsize=fs) c2.ax.set_yticklabels(['-5','0','5'],fontsize=fs) ax1.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(lw) ax2.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax2.spines[axis].set_linewidth(lw) ax3.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax3.spines[axis].set_linewidth(lw) ax4.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax4.spines[axis].set_linewidth(lw) c1.ax.tick_params(width=lw) c1.outline.set_linewidth(lw) c2.ax.tick_params(width=lw) c2.outline.set_linewidth(lw) fig.set_size_inches(142.5/25.4, 142.5/25.4/1.12,forward=True) plt.savefig('figures/figS2.pdf',dpi=300) # - # ## Figure S3 # + ds = gdal.Open(jun10r, gdal.GA_ReadOnly) metaxy = ds.GetGeoTransform() with open('DEMs/qs0.pkl', 'rb') as file: qs0 = pickle.load(file) xr = metaxy[0]+metaxy[1]*np.arange(0,np.shape(qs0)[1]) yr = np.flip(metaxy[3]+metaxy[5]*np.arange(0,np.shape(qs0)[0])) xr = metaxy[0]+metaxy[1]*np.arange(0,np.shape(qs0)[1]) yr = np.flip(metaxy[3]+metaxy[5]*np.arange(0,np.shape(qs0)[0])) Xr,Yr = np.meshgrid(xr,yr) # + lw = 0.5 fig = plt.gcf() ax0 = plt.subplot(111) s = plt.pcolormesh(Yr*m2km,Xr*m2km,qs0/s2y,cmap='plasma',vmin=0,vmax=25,rasterized=True) plt.plot([np.min(Yr[~np.isnan(qs0)]*m2km),np.min(Yr[~np.isnan(qs0)]*m2km)],[np.min(Xr[~np.isnan(qs0)]*m2km)+0.5,np.min(Xr[~np.isnan(qs0)]*m2km)+1+0.5],'k',lw=2*lw) plt.text(np.min(Yr[~np.isnan(qs0)]*m2km)+0.05,np.min(Xr[~np.isnan(qs0)]*m2km)+0.5+0.5,'1 km',fontsize=fs) ax0.axis('equal') plt.ylabel('$x_d$ (km)',fontsize=fs) plt.xlabel('$y_d$ (km)',fontsize=fs) plt.ylim(np.min(Xr[~np.isnan(qs0)]*m2km),np.max(Xr[~np.isnan(qs0)]*m2km)) plt.xlim(np.min(Yr[~np.isnan(qs0)]*m2km),np.max(Yr[~np.isnan(qs0)]*m2km)) plt.axis('off') fig.subplots_adjust(bottom=0.01) fig.subplots_adjust(top=0.99) fig.subplots_adjust(right=0.95) fig.subplots_adjust(left=0.05) cbar_ax = fig.add_axes([0.85, 0.5, 0.01, 0.2]) c = plt.colorbar(s, cax=cbar_ax, ticks=[0, 10, 20]) c.set_label('$q_s$ (m$^2$/yr)',fontsize=fs) c.ax.set_yticklabels(['0','10','20'],fontsize=fs) c.ax.tick_params(width=lw) c.outline.set_linewidth(lw) fig.set_size_inches(190/25.4, 230/25.4, forward=True) plt.savefig('figures/figS3.pdf',dpi=300) # - # ## Figure S4 # + c=cm.viridis_r(np.linspace(0,1,Nz0)) lw=0.5 Np = 3 z0_i = np.linspace(0.2,0.8,Np)*Nz0 S = 10 A = 0.5 fig = plt.gcf() ax0 = plt.subplot(131) for i in z0_i.astype(int): for j in z0_i.astype(int): if i==j: continue else: ax0.plot(X1[i,j,:]*m2km,dT[i,j,:],c=c[i],lw=lw*1.5) plt.xscale('log') plt.xlim(1e-1,1e+3) plt.ylim(-30,30) plt.xticks(fontsize=fs) plt.yticks([-30,0,30],fontsize=fs) plt.xlabel('$S_{d}$ (km)',fontsize=fs) plt.ylabel('$\\theta_{10}-\\theta_{out}$ ($^{\\circ}$)',fontsize=fs) plt.text(1e-1,3,'a b c',fontsize=12) ax1 = plt.subplot(132) s1 = ax1.pcolormesh(Z0_0,Z0_1,Asat*180/np.pi,cmap='seismic',vmin=-45,vmax=45,rasterized=True) ax2.scatter(z00,z01,c='r',s=S,lw=0) plt.xscale('log') plt.yscale('log') plt.xlim(np.min(Z0_0),np.max(Z0_0)) plt.ylim(np.min(Z0_1),np.max(Z0_1)) plt.xticks([1e-5,10**-2.5,1e+0],['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) plt.yticks([1e-5,10**-2.5,1e+0],['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) plt.xlabel('$z_{0,out}$ (m)',fontsize=fs) plt.ylabel('$z_{0,in}$ (m)',fontsize=fs) ax2 = plt.subplot(133) s2 = ax2.pcolormesh(Z0_0,Z0_1,np.log10(Usat),cmap='magma',vmin=-0.5,vmax=0.5,rasterized=True) plt.xscale('log') plt.yscale('log') plt.xlim(np.min(Z0_0),np.max(Z0_0)) plt.ylim(np.min(Z0_1),np.max(Z0_1)) plt.xticks([1e-5,10**-2.5,1e+0],['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) plt.yticks([1e-5,10**-2.5,1e+0],['$10^{-5}$','$10^{-2.5}$','$10^{0}$'],fontsize=fs) plt.xlabel('$z_{0,out}$ (m)',fontsize=fs) plt.ylabel('$z_{0,in}$ (m)',fontsize=fs) fig.subplots_adjust(wspace=0.75) fig.subplots_adjust(bottom=0.3) fig.subplots_adjust(top=0.93) fig.subplots_adjust(left=0.19) cbar_ax = fig.add_axes([0.09, 0.3, 0.01, 0.63]) c0 = fig.colorbar(s1, cax=cbar_ax, ticks=[-45,0,45]) cbar_ax.yaxis.set_label_position('left') cbar_ax.yaxis.set_ticks_position('left') c0.set_label('$\\theta_{sat}-\\theta_{out}$ ($^{\\circ}$)',fontsize=fs) c0.ax.set_yticklabels(['-45','0','45'],fontsize=fs) fig.subplots_adjust(right=0.85) cbar_ax = fig.add_axes([0.88, 0.3, 0.01, 0.63]) c1 = fig.colorbar(s2, cax=cbar_ax, ticks=[-0.5,0,0.5]) c1.set_label('$U_{sat}/U_{out}$ ',fontsize=fs) c1.ax.set_yticklabels(['$10^{-0.5}$','$10^{0}$','$10^{0.5}$'],fontsize=fs) ax0.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) ax1.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(lw) ax2.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax2.spines[axis].set_linewidth(lw) c0.ax.tick_params(width=lw) c0.outline.set_linewidth(lw) c1.ax.tick_params(width=lw) c1.outline.set_linewidth(lw) fig.set_size_inches(142.5/25.4, 142.5/25.4/4.4, forward=True) plt.savefig('figures/figS4.pdf', dpi=300) # + A = 1 S = 20 fig = plt.gcf() ax0 = plt.subplot(131) ax0.plot(X_t*m2km,Q_t/s2y,'m',lw=lw*1.5) ax0.plot(X_t*m2km,Q_t_SR/s2y,'c',lw=lw*1.5) ax0.plot(X_t*m2km,Q_t_DD/s2y,'y',lw=lw*1.5) ax0.plot(sr*m2km,qavg[2]/s2y,c='k',lw=lw,label='DTM') plt.text(10,0,'a b c',fontsize=12) plt.xlabel('$S_d$ (km)',fontsize=fs) plt.ylabel('$q_s$ (m$^2$/s)',fontsize=fs) plt.xticks(fontsize=fs) plt.yticks(fontsize=fs) plt.xlim(-1.5,10) plt.ylim(0,20) plt.xticks([0,5,10],fontsize=fs) plt.yticks([0,10,20],fontsize=fs) plt.legend(frameon=False,handlelength=lw*2,fontsize=fs) ax1 = plt.subplot(132) ax1.plot(X_t*m2km,U_t,'m',lw=lw*1.5) ax1.plot(X_t*m2km,U_t_SR,'c',lw=lw*1.5) ax1.plot(X_t*m2km,U_t_DD,'y',lw=lw*1.5) ax1.scatter([s_a_o*m2km,s_b_o*m2km,s_c_o*m2km],[np.mean(u2aca),np.mean(u2bca),np.mean(u2cca)],c='k',marker='v',s=S,alpha=A,lw=0,label='Met') ax1.plot([s_a_o*m2km,s_a_o*m2km],[np.percentile(u2aca,45),np.percentile(u2aca,55)],c='k',alpha=A,lw=lw) ax1.plot([s_b_o*m2km,s_b_o*m2km],[np.percentile(u2bca,45),np.percentile(u2bca,55)],c='k',alpha=A,lw=lw) ax1.plot([s_c_o*m2km,s_c_o*m2km],[np.percentile(u2cca,45),np.percentile(u2cca,55)],c='k',alpha=A,lw=lw) plt.xlabel('$S_d$ (km)',fontsize=fs) plt.ylabel('$U_{10}$ (m/s)',fontsize=fs) plt.xticks(fontsize=fs) plt.yticks(fontsize=fs) plt.legend(frameon=False,handlelength=lw*2,fontsize=fs) plt.xlim(-1.5,10) plt.ylim(5,7) plt.xticks([0,5,10],fontsize=fs) plt.yticks([5,6,7],fontsize=fs) ax2 = plt.subplot(133) ax2.plot(U0a-U0b,U0a-U0c,'m',lw=lw*1.5,label='Basic') ax2.plot(U0a_DD-U0b_DD,U0a_DD-U0c_DD,'y',lw=lw*1.5,label='$\\alpha$(Data)') ax2.plot(U0a_SR-U0b_SR,U0a_SR-U0c_SR,'c',lw=lw*1.5,label='$z_0=\sigma_\eta/10$') hist = np.histogram2d(u0aca-u0bca,u0aca-u0cca,bins=[np.linspace(-6,6,40),np.linspace(-6,6,40)]) hist[0][hist[0]==0] = np.nan s0 = plt.pcolormesh(hist[1],hist[2],hist[0].T,rasterized=True,vmin=0,vmax=20,cmap='copper') plt.xlabel('$U_{2}^a-U_{2}^b$ (m/s)',fontsize=fs) plt.ylabel('$U_{2}^a-U_{2}^c$ (m/s)',fontsize=fs) plt.xlim(-7,7) plt.ylim(-7,7) plt.gca().set_aspect('equal') plt.xticks([-7,0,7],fontsize=fs) plt.yticks([-7,0,7],fontsize=fs) plt.legend(frameon=False,handlelength=lw*2,fontsize=fs*0.6,loc=3) fig.subplots_adjust(wspace=0.55) fig.subplots_adjust(bottom=0.28) fig.subplots_adjust(right=0.97) fig.subplots_adjust(top=0.95) fig.subplots_adjust(left=0.19) cbar_ax = fig.add_axes([0.08, 0.3, 0.01, 0.65]) c0 = fig.colorbar(s0, cax=cbar_ax, ticks=[0,10,20]) cbar_ax.yaxis.set_label_position('left') cbar_ax.yaxis.set_ticks_position('left') c0.set_label('$n$',fontsize=fs) c0.ax.set_yticklabels(['$0$','$10$','$20$'],fontsize=fs) ax0.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax0.spines[axis].set_linewidth(lw) ax1.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax1.spines[axis].set_linewidth(lw) ax2.tick_params(width=lw) for axis in ['top','bottom','left','right']: ax2.spines[axis].set_linewidth(lw) c0.ax.tick_params(width=lw) c0.outline.set_linewidth(lw) fig.set_size_inches(142.5/25.4, 142.5/25.4/3.7, forward=True) plt.savefig('figures/figS5.pdf', dpi=300)
paper-figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: selfresearch # language: python # name: selfresearch # --- # + """ BERT 입력으로 뭐가 들어가는지 BERT seq_len 으로 뭐가 들어가야하는지 """ import pandas as pd import numpy as np import matplotlib.pyplot as plt import re import urllib.request import json import sys sys.path.append('..') # - train_data = pd.read_table('data/train_nsmc.txt', sep=" ") val_data = pd.read_table('data/val_nsmc.txt', sep=" ") test_data = pd.read_table('data/test_nsmc.txt',sep=" ") """ 데이터 확인 """ train_data[:5] """ 데이터 정제 """ print('훈련용 리뷰 개수 :',len(train_data)) train_data['document'].nunique(), train_data['label'].nunique() train_data.drop_duplicates(subset=['document'], inplace=True) print(train_data.isnull().values.any()) print(train_data.isnull().sum()) train_data.loc[train_data.document.isnull()] train_data = train_data.dropna(how = 'any') # Null 값이 존재하는 행 제거 print(train_data.isnull().values.any()) # Null 값이 존재하는지 확인 train_data['document'] = train_data['document'].str.replace("[^ㄱ-ㅎㅏ-ㅣ가-힣0-9 ]","") train_data[:5] train_data['document'].replace('', np.nan, inplace=True) print(train_data.isnull().sum()) train_data = train_data.dropna(how = 'any') print(len(train_data)) train_data['label'][:5] from sklearn.model_selection import train_test_split test_data.drop_duplicates(subset = ['document'], inplace=True) # document 열에서 중복인 내용이 있다면 중복 제거 test_data['document'] = test_data['document'].str.replace("[^ㄱ-ㅎㅏ-ㅣ가-힣0-9 ]","") # 정규 표현식 수행 test_data['document'].replace('', np.nan, inplace=True) # 공백은 Null 값으로 변경 test_data = test_data.dropna(how='any') # Null 값 제거 print('전처리 후 테스트용 샘플의 개수 :',len(test_data)) train, val = train_test_split(train_data,train_size = 0.8, test_size = 0.2, stratify=train_data['label']) train[:5] val[:5] train.groupby('label').size() val.groupby('label').size() train.to_csv("data/train_nsmc.txt",sep = " ") val.to_csv("data/val_nsmc.txt",sep=" ") test_data("data/test_nsmc.txt",sep=" ") test_data.groupby('label').size() from transformers import ElectraModel, ElectraTokenizer tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-base-v3-discriminator") model = ElectraModel.from_pretrained("monologg/koelectra-base-v3-discriminator") a=tokenizer("안녕하세요 여러분 저는 영윤이입니다. 반가워요 여러분",return_tensors = "pt",max_len=128) model(**a) max_len = 128 tokenizer.decode(a) len(a) a.extend(([1])*(max_len-len(a))) a len(a) def prepare_with_json(infile, outfile): with open(outfile, 'w')as f: for index, row in infile.iterrows(): doc = tokenizer.encode(row["document"]) instance = {"id": row["id"], "doc":row["document"], "label":row["label"]} f.write(json.dumps(instance)) f.write("\n") prepare_with_json(train_data, "data/ratings_train.json") prepare_with_json(val_data, "data/rating_val.json") prepare_with_json(test_data, "data/ratings_test.json") train_data
stock_classifier/data_getter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Pull highway data from OpenStreetMap as shapefile # This notebook pulls highway line data from the OpenStreetMap database and creates a shapefile containing the query results. # #### Required packages # <a href="https://github.com/DinoTools/python-overpy">overpy</a> <br /> # <a href="https://github.com/Toblerity/Fiona">Fiona</a> # #### Variable settings # bounding_box_filepath — path to shapefile to can define the desired bounding box to query the OpenStreetMap database <br /> # result_shapefile_filepath — path to export shapefile containing the query results bounding_box_file = "" result_shapefile_filepath = "" # #### Import statements import overpy import fiona # #### Utility functions # function to see what results were returned from the Overpass API query def print_results(results): for way in result.ways: print("Name: %s" % way.tags.get("name", "n/a")) print(" Highway: %s" % way.tags.get("highway", "n/a")) print(" Nodes:") for node in way.nodes: print(" Lat: %f, Lon: %f" % (node.lat, node.lon)) # #### Query OpenStreetMap using OverpassAPI via overpy python package # setup Overpass api api = overpy.Overpass() # define bounding box from a 1km-buffered envelope around the study area boundary with fiona.open(bounding_box_file, mode='r') as bounding_box: bounds = bounding_box.bounds bounding_box.close() print(bounds) # define query query = """way({bottom},{left},{top},{right}) ["highway"]; (._;>;); out body;""".format(bottom=bounds[1], left=bounds[0], top=bounds[3], right=bounds[2]) # execute query result = api.query(query) # #### Write OpenStreetMap data to a shapefile from fiona.crs import from_epsg schema = {'geometry': 'LineString', 'properties': {'Name':'str:80', 'Type':'str:80'}} with fiona.open(result_shapefile_filepath, 'w', crs=from_epsg(4326), driver='ESRI Shapefile', schema=schema) as output: for way in result.ways: # the shapefile geometry use (lon,lat) line = {'type': 'LineString', 'coordinates':[(node.lon, node.lat) for node in way.nodes]} prop = {'Name': way.tags.get("name", "n/a"), 'Type': way.tags.get("highway", "n/a")} output.write({'geometry': line, 'properties':prop}) output.close()
utilities/Pull data from OpenStreetMap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Average Directional Index (ADX) # https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx # Average Directional Index (ADX) is technical indicator; as a result, the values range from 0 to 100. The ADX gives a signal of trend strength. # # If ADX is below 20, the trend is weak; however, if ADX is above 50, the trend is strong. ADX does not tell you the direction of the trend and it only gives the strength of the trend. # + outputHidden=false inputHidden=false import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # yfinance is used to fetch data import yfinance as yf yf.pdr_override() # + outputHidden=false inputHidden=false # input symbol = 'AAPL' start = '2018-08-01' end = '2018-12-31' # Read data df = yf.download(symbol,start,end) # View Columns df.head() # + outputHidden=false inputHidden=false # Simple Line Chart plt.figure(figsize=(14,10)) plt.plot(df['Adj Close']) plt.legend(loc='best') plt.title('Stock '+ symbol +' Closing Price') plt.xlabel('Date') plt.ylabel('Price') plt.show() # + outputHidden=false inputHidden=false import talib as ta # + outputHidden=false inputHidden=false adx = ta.ADX(df['High'], df['Low'],df['Adj Close'], timeperiod=14) adx = adx.dropna() adx # + outputHidden=false inputHidden=false # Line Chart fig = plt.figure(figsize=(14,10)) ax1 = plt.subplot(2, 1, 1) ax1.plot(df['Adj Close']) #ax1.grid(True, which='both') ax1.grid(which='minor', linestyle='-', linewidth='0.5', color='black') ax1.grid(which='major', linestyle='-', linewidth='0.5', color='red') ax1.minorticks_on() ax1.legend(loc='best') ax1.set_title('Stock '+ symbol +' Closing Price') ax1.set_ylabel('Price') ax2 = plt.subplot(2, 1, 2) ax2.plot(adx, '-', label='ADX') ax2.text(s='Strong Trend', x=adx.index[0], y=50, fontsize=14) ax2.text(s='Weak Trend', x=adx.index[0], y=20, fontsize=14) ax2.axhline(y=50,color='r') ax2.axhline(y=20,color='r') ax2.set_xlabel('Date') ax2.legend(loc='best') # - # ## Candlestick with ADX # + outputHidden=false inputHidden=false # Candlestick dfc = df.copy() from matplotlib import dates as mdates import datetime as dt dfc['ADX'] = ta.ADX(dfc['High'], dfc['Low'],dfc['Adj Close'], timeperiod=14) dfc = dfc.dropna() dfc.head() # + outputHidden=false inputHidden=false dfc = dfc.reset_index() dfc['Date'] = mdates.date2num(dfc['Date'].astype(dt.date)) dfc.head() # + outputHidden=false inputHidden=false from mpl_finance import candlestick_ohlc fig = plt.figure(figsize=(14,10)) ax1 = plt.subplot(2, 1, 1) candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0) ax1.xaxis_date() ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) ax1.grid(True, which='both') #ax1.grid(which='minor', linestyle='-', linewidth='0.5', color='black') #ax1.grid(which='major', linestyle='-', linewidth='0.5', color='red') ax1.minorticks_on() #ax1.legend(loc='best') ax1.set_title('Stock '+ symbol +' Closing Price') ax1.set_ylabel('Price') ax2 = plt.subplot(2, 1, 2) ax2.plot(adx, '-', label='ADX') ax2.text(s='Strong Trend', x=adx.index[0], y=50, fontsize=14) ax2.text(s='Weak Trend', x=adx.index[0], y=20, fontsize=14) ax2.axhline(y=50,color='r') ax2.axhline(y=20,color='r') ax2.set_xlabel('Date') ax2.legend(loc='best') # + outputHidden=false inputHidden=false fig = plt.figure(figsize=(14,10)) ax1 = plt.subplot(2, 1, 1) candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0) ax1.xaxis_date() ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) ax1.grid(True, which='both') ax1.minorticks_on() ax1v = ax1.twinx() dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close'] colors = dfc.VolumePositive.map({True: 'g', False: 'r'}) ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4) ax1v.axes.yaxis.set_ticklabels([]) ax1v.set_ylim(0, 3*df.Volume.max()) ax1.set_title('Stock '+ symbol +' Closing Price') ax1.set_ylabel('Price') ax2 = plt.subplot(2, 1, 2) ax2.plot(adx, '-', label='ADX') ax2.text(s='Strong Trend', x=adx.index[0], y=50, fontsize=14) ax2.text(s='Weak Trend', x=adx.index[0], y=20, fontsize=14) ax2.axhline(y=50,color='r') ax2.axhline(y=20,color='r') ax2.set_xlabel('Date') ax2.legend(loc='best')
Python_Stock/Technical_Indicators/ADX.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import torch.nn as nn import torch.nn.functional as F from torchvision import datasets, transforms from torch.optim.lr_scheduler import StepLR from agc_optims.optim import SGD_AGC, Adam_AGC, AdamW_AGC, RMSprop_AGC from torch.optim import SGD, Adam, AdamW, RMSprop from agc_optims.clipper import AGC from time import time """ This example was taken from: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html You can change the optimizer and test on the CIFAR 10 dataset """ device = torch.device("cuda" if torch.cuda.is_available() else "cpu") transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))]) batch_size = 256 trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2) testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = torch.flatten(x, 1) # flatten all dimensions except batch x = F.relu(self.fc1(x)) x = self.fc2(x) return x net = Net().to(device=device) criterion = nn.CrossEntropyLoss() """ Change the optimizer to test AGC """ optimizer = Adam(net.parameters(), lr=0.001) optimizer = AGC(optimizer=optimizer, clipping=0.16) for epoch in range(5): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data inputs = inputs.to(device=device) labels = labels.to(device=device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.to(device=device) labels = labels.to(device=device) outputs = net(images) _, predicted = torch.max(outputs, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f'Epoch {epoch + 1}: Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
examples/cifar_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:deeprl] # language: python # name: conda-env-deeprl-py # --- import torch from torch import nn from torch import optim import numpy as np from torch.nn import functional as F import gym import matplotlib.pyplot as plt from skimage.transform import resize from collections import deque from IPython.display import clear_output, display import torch.multiprocessing as mp #env = gym.make("Pong-v0") env = gym.make("CartPole-v1") env.reset() #env.unwrapped.get_action_meanings() '''#Test environment with random actions env.reset() #actions = [0,2,3] for i in range(2000): env.render() a = env.action_space.sample() #a = np.random.choice(actions) state, reward, done, info = env.step(a) if done: env.reset() env.close()''' #env.reset() '''state,reward,done,lives=env.step(2) plt.imshow(downscale_obs(state, new_size=(42,42))) print(reward,done)''' # + #t1=prepare_initial_state(env.render('rgb_array'),3) #t2=prepare_multi_state(t,env.render('rgb_array')).shape #t1.shape,t2.shape # - class ActorCritic(nn.Module): def __init__(self): super(ActorCritic, self).__init__() self.l1 = nn.Linear(4,25) self.l2 = nn.Linear(25,50) self.actor_lin1 = nn.Linear(50,2) self.l3 = nn.Linear(50,25) self.critic_lin1 = nn.Linear(25,1) def forward(self,x): x = F.normalize(x,dim=0) y = F.relu(self.l1(x)) y = F.relu(self.l2(y)) actor = F.log_softmax(self.actor_lin1(y),dim=0) c = F.relu(self.l3(y.detach())) critic = torch.tanh(self.critic_lin1(c)) return actor, critic '''x =np.arange(start=10,stop=0,step=-1) print(x) gamma = 0.9 x[0]=-20 G = [] g = x[0] G.append(g) print(g) for i in range(len(x)-1): g = g + gamma * x[i+1] G.append(g) G''' '''TestModel = ActorCritic() state=env.step(1)[0] a,_ = TestModel(prepare_initial_state(state))''' import time # + def update_params(worker_opt,values,logprobs,rewards,clc=0.1,gamma=0.95): rewards = torch.Tensor(rewards).flip(dims=(0,)).view(-1) logprobs = torch.stack(logprobs).flip(dims=(0,)).view(-1) #to Tensor and reverse values = torch.stack(values).flip(dims=(0,)).view(-1) #to Tensor and reverse Returns = [] ret_ = torch.Tensor([0])#rewards_[0] #Ret.append(ret_) for r in range(rewards.shape[0]): ret_ = rewards[r] + gamma * ret_ Returns.append(ret_) Returns = torch.stack(Returns).view(-1) Returns = F.normalize(Returns,dim=0) actor_loss = -1*logprobs * (Returns - values.detach()) critic_loss = torch.pow(values - Returns,2) loss = actor_loss.sum() + clc*critic_loss.sum() loss.backward() worker_opt.step() return actor_loss, critic_loss, len(rewards) def run_episode(worker_env, worker_model): state = torch.from_numpy(worker_env.env.state).float() values, logprobs, rewards = [],[],[] done = False j=0 while (done == False): j+=1 #run actor critic model policy, value = worker_model(state) values.append(value) #sample action logits = policy.view(-1) action_dist = torch.distributions.Categorical(logits=logits) action = action_dist.sample() logprob_ = policy.view(-1)[action] logprobs.append(logprob_) state_, _, done, info = worker_env.step(action.detach().numpy()) state = torch.from_numpy(state_).float() if done: reward = -10 worker_env.reset() else: reward = 1.0 rewards.append(reward) return values, logprobs, rewards def worker(t, worker_model, counter, params, losses): #q is mp Queue print("In process {}".format(t,)) start_time = time.time() #play n steps of the game, store rewards worker_env = gym.make("CartPole-v1") worker_env.reset() worker_opt = optim.Adam(lr=1e-4,params=worker_model.parameters()) worker_opt.zero_grad() for i in range(params['epochs']): worker_opt.zero_grad() #stores values, logprobs, rewards = run_episode(worker_env,worker_model) actor_loss,critic_loss,eplen = update_params(worker_opt,values,logprobs,rewards) counter.value = counter.value + 1 losses.put(eplen) if i % 50 == 0: print("Process: {} Maxrun: {} ALoss: {} CLoss: {}".format(t,eplen, \ actor_loss.detach().mean().numpy(),critic_loss.detach().mean().numpy())) if time.time() - start_time > 45: print("Done 45 seconds") break; # - '''v=torch.arange(start=5,end=0,step=-1) print(v[:-1],v[1:])''' '''%%time TestModel = ActorCritic() worker_opt = optim.Adam(lr=1e-4,params=TestModel.parameters()) q2 = mp.Value('i',0) params = { 'epochs':5, 'n_steps':5, 'n_workers':1, } AC_step(0,TestModel,q2,params)''' # # <span style="color:red;">Train</span> # %%time if __name__ == '__main__': MasterNode = ActorCritic() MasterNode.share_memory() processes = [] params = { 'epochs':1000, 'n_workers':7, } counter = mp.Value('i',0) losses = mp.Queue() for i in range(params['n_workers']): p = mp.Process(target=worker, args=(i,MasterNode,counter,params,losses)) p.start() processes.append(p) for p in processes: p.join() for p in processes: p.terminate() print(counter.value,processes[1].exitcode) losses_ = [] while not losses.empty(): losses_.append(losses.get()) plt.figure(figsize=(9,5)) x = np.array(losses_) N = 50 x = np.convolve(x, np.ones((N,))/N, mode='valid') plt.ylabel("Mean Episode Length") plt.xlabel("Training Time") plt.title("CartPole Training Evaluation") plt.plot(x) #plt.savefig("avg_rewards.png") # ## Test steps = 2000 env = gym.make("CartPole-v1") env.reset() maxrun = 0 state = torch.from_numpy(env.env.state).float() done = False avg_run = 0 runs = int(25) for i in range(runs): maxrun = 0 done = False env.reset() state = torch.from_numpy(env.env.state).float() while(done==False): #env.render('human') policy, value = MasterNode(state) #sample action action = torch.distributions.Categorical(logits=policy.view(-1)).sample().detach().numpy() state_, reward, done, lives = env.step(action) #print(value,reward) state = torch.from_numpy(state_).float() maxrun += 1 avg_run += maxrun avg_run = avg_run / runs env.close() print("Maxrun: {}".format(avg_run,)) '''TestModel = ActorCritic() env = gym.make("CartPole-v1") env.reset() maxrun = 0 state = torch.from_numpy(env.env.state).float() done = False avg_run = 0 runs = int(200) for i in range(runs): maxrun = 0 done = False env.reset() state = torch.from_numpy(env.env.state).float() while(done==False): #env.render('human') policy, value = TestModel(state) #sample action action = torch.distributions.Categorical(logits=policy.view(-1)).sample() state_, reward, done, lives = env.step(env.action_space.sample()) state = torch.from_numpy(state_).float() maxrun += 1 avg_run += maxrun avg_run /= runs env.close() print("Maxrun: {}".format(avg_run,))'''
Chapter 5/Cartpole A3C.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # For Daniel class Config(): def __init__(self, lower_bound, upper_bound, normalization_mean, normalization_std_dev): self.lower_bound = lower_bound self.upper_bound = upper_bound self.normalization_mean = normalization_mean self.normalization_std_dev = normalization_std_dev def __repr__(self): return f"""lower_bound: {self.lower_bound} upper_bound: {self.upper_bound} normalization_mean: {self.normalization_mean} normalization_std_dev: {self.normalization_std_dev}""" class DataCleaner(): def __init__(self,data, config): self.data = data self.config = config self.redo_metrics() def redo_metrics(self): self.mean = np.mean(self.data) self.std_dev = np.std(self.data) def run_default_pipeline(self): self.trim() self.normalize() def trim(self): self.data = np.clip(self.data, self.config.lower_bound, self.config.upper_bound) self.redo_metrics() def normalize(self): self.data = np.divide(self.data - self.mean, self.std_dev) self.data = self.data * self.config.normalization_std_dev + self.config.normalization_mean self.redo_metrics() raw_data = [1,5,2,7,4,8,4,9,3,6,5,4] config_1 = Config(lower_bound=3,upper_bound=7, normalization_mean=5, normalization_std_dev=1) test_1 = DataCleaner(raw_data, config_1) test_1.trim() test_1.normalize() test_1.trim() config_1 config_2 = Config(lower_bound=1,upper_bound=9, normalization_mean=5, normalization_std_dev=1) test_2 = DataCleaner(raw_data, config_2) test_2.trim() test_2.normalize() config_2 for config in [config_1,config_2]: test = DataCleaner(raw_data, config) test.trim() test.normalize() print("="*40) print(config) print(f"Random metric: for demonstration purposes: {test.std_dev}") plt.scatter(x=test_1.data, y=test_2.data,)
config_classes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # Lab 1 - Python # # # According to Wikipedia, [Python] is a widely used interpreted, high-level programming language used for general-purpose programming. It has a design philosophy which emphasizes code readability (notably using whitespace indentation to delimit code blocks rather than curly braces or keywords), and a syntax which allows programmers to express concepts in fewer lines of code than possible in languages such as C++ or Java. # # Let us first look at the characteristics of the language: # # - **General purpose**: not built for a particular usage, it works as well for scientific computing as for web and application development. It features high-level data structures and supports multiple paradigms: procedural, object-oriented and functional. # # - **Elegant syntax**: easy-to-read and intuitive code, easy-to-learn minimalistic syntax, quick to write (low boilerplate / verbosity), maintainability scales well with size of projects. # # - **Expressive language**: fewer lines of code, fewer bugs, easier to maintain. # # Some technical details: # # - **Dynamically typed**: no need to define the type of variables, function arguments or return types. # # - **Automatic memory management** (garbage collector): no need to explicitly allocate and deallocate memory for variables and data arrays. No memory leak bugs. # # - **Interpreted**: No need to compile the code. The Python interpreter reads and executes the python code directly. It also means that a single Python source runs anywhere a runtime is available, like on Windows, Mac, Linux and in the Cloud. # # A couple of advantages: # # - **Ease of programming**: it is very intuitive to write Python code, minimizing the time required to develop, debug and maintain the code. # # - **Well-designed language**: it encourages many good programming practices, such as modularity, object-oriented programmed, good system for packaging, large open-source community, integrated documentation, among many ofthers. # # And some disadvantages: # # - **Two versions**: there are two versions of Python in general use: 2 and 3. While Python 3 is now well adopted, some libraries or legacy code still only support Python 2 leading sometimes to incompatibility. In this class, we will use Python 3.5. # # - **Slow execution**: due to its interpreted and dynamic nature, the execution of Python code can be slow compared to compiled statically typed programming languages, such as C and Fortran. # # - **No compiler**: there is no compiler to catch your errors. Solutions include unit / integration tests or the use of a [linter] such as [pyflakes], [Pylint] or [PyChecker]. [Flake8] combines static analysis with style checking. # # [Python]: https://en.wikipedia.org/wiki/Python_(programming_language) # [linter]: https://en.wikipedia.org/wiki/Lint_%28software%29 # [pyflakes]: https://pypi.python.org/pypi/pyflakes # [Pylint]: https://www.pylint.org # [PyChecker]: http://pychecker.sourceforge.net # [Flake8]: https://pypi.python.org/pypi/flake8 # # Finally, a general tip: Python is really easy to read, and libraries always provide in-code as well as on-line documentation that you should read. "[*Read the doc*](https://docs.python.org/3.5/)" should be your moto when coding in Python. # # Now, let us start playing around! # # ## Indentation # # The very first thing to know is that Python is an **indent-based** language meaning that blocks are delimited by the indentation of the code (and not by the curly brackets `{}`, as it is the case in Java or C++ for instance). # # You can run the following cell by focusing it (e.g., click on the cell) and doing `CTRL-Enter` (to leave the focus on the cell) or `SHIFT-Enter` (to focus the next cell). You should get and error. Try to fix it! a = 1 b = 2 a + b # ## Variables # # Variables are easily set. No need to specify any type. For instance, fill the following cell with your name and your sciper number: my_name = "Lejal" my_sciper = 250692 # ## Weak Typing # # Python is a weakly typed programming language. If you are used to Java, you know that you have to specify the type of your variables. In Python, you do not need to do that, and types can be mixed. For instance here we can store fruits (`str`), numbers (`float` and `int`) and students in a `list`: # fruits_and_numbers = ["Apple", 3.141593, "Cherry", 4] # You can easily `append` elements to the list. Append your sciper to the list. fruits_and_numbers.append(my_name) fruits_and_numbers.append(my_sciper) fruits_and_numbers # You can learn more about [built-in types](https://docs.python.org/3.5/library/stdtypes.html). # ## For # # You can go through a `list` using `for` loops. Remember that they behave like `foreach` by default. In this cell, we use the `print` [built-in function](https://docs.python.org/3.5/library/functions.html). for item in fruits_and_numbers: print(item) # ## If # # An `if` statement is also pretty straight forward. The `elif` and `else` statements are optional. for item in fruits_and_numbers: if type(item) is int and item == my_sciper: print(item, "is your sciper number") elif type(item) is int: print(item, "is an integer") elif type(item) is float: print(item, "is a float") else: print(item, "might be a string") # ### Functions # # As many modern language, Python allows to define functions that can be called later in your code. A function is denoted by the `def` keyword. It can take *positional* (or *required*) arguments as well as *keyword* (or *optional*) arguments. Keyword arguments are optional and a default value is given in the function definition. def shout(sentence, louder=False): if not louder: print(sentence.upper()) else: print(sentence.upper() + "!!!") # The following three lines are equivalent: shout("you shall not pass") shout("fly you fools", louder=False) shout(sentence="my precious", louder=False) # Try to shout your name `louder`. shout(my_name, louder=True) # ## Import # # [Modules](https://docs.python.org/3/tutorial/modules.html) in Python are files where variables, functions and classes are defined. They can then be imported in other modules (or scripts). Python comes with a [standard library](https://docs.python.org/3.5/library/) containing a lot of useful modules that you are likely to make use of. For instance, we use here the `listdir()` function from the [operating system](https://docs.python.org/3.5/library/os.html) library. # + import os os.listdir() # - # You can also define your own modules, and import them in other files or notebooks. This is very useful if you want to modularize your code for clarity, efficiency, readability and bug-proof code. For instance here, we have a file named `internet_analytics.py` in the `notebooks/modules` folder (check it!) which contains some functions (`describe()` in this case). # + from modules import internet_analytics internet_analytics.describe() # - # You can import the whole module or just part of it, such as one function of interest. # + from modules.internet_analytics import solution_to_final_exam solution_to_final_exam() # - # ## Going Further # # You now have a brief overview of Python. For a more complete introduction, you should take a look at [the Python tutorial](https://docs.python.org/3/tutorial/index.html). Here is an overview of this document: # # * Skim through Sections 1-3 to get an idea of the Python syntax if you never used it # * Pay a little more attention to Section 4, especially # * Section 4.2 on `for` loops, as they behave like `foreach` by default, which may be disturbing if you are more acoutumed to coding in lower level languages. # * Section 4.7 on functions, default argument values and named arguments, as they are a real pleasure to use (compared to traditional, order-based arguments) once you are used to it. # * Section 5 on Data Structures, especially how to use `list`s, `dictionary`s and `tuple`s if you have not used a language with those concepts before. # * You can keep Sections 6-9 on `module`s, `IO`, `Exception`s and `Object`s for later - when you know you will be needing it. # * Section 10 on the standard library and [the standard library index](https://docs.python.org/3/library/index.html) are worth a quick scroll to see what's available. # * Do not bother with Sections 11-16 for now. # # Some additional ressources can be found here: # # * Some exercises: http://www.learnpython.org/ # * More exercises: https://learnpythonthehardway.org/book/index.html # * A Python tour of Data Science (a source of inspiration for this lab): https://github.com/mdeff/python_tour_of_data_science
ix-1-lab-intro/1-python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Parses GTF files # %load_ext autoreload # %autoreload 2 # %matplotlib inline # + import glob import sys sys.path.append('../linker') from GTF import lines, dataframe, parse # - gene_names = {} for filename in sorted(glob.glob("/Users/joewandy/Downloads/gtf_to_parse/*.gtf.gz")): collision = 0 try: gtf_dict = lines(filename) for d in gtf_dict: try: gene_id = d['gene_id'] gene_name = d['gene_name'] gene_names[gene_id] = gene_name except KeyError: continue except IndexError: continue finally: print(filename, len(gene_names)) import pickle pickle.dump(gene_names, open('../static/data/gene_names.p', 'wb'))
graphomics/notebooks/mapping/parse_gtf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''base'': conda)' # language: python # name: python3 # --- # + import math import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.style.use('default') from scipy import signal # + fpath_data = '../2021-12-04/labelled_from_OpenBCI/S03-trial4-2021-12-04-OpenBCI.csv' channel_names = ['EXG Channel 0', 'EXG Channel 1', 'EXG Channel 2', 'EXG Channel 3', 'EXG Channel 4', 'EXG Channel 5'] fs = 250 # + df_data = pd.read_csv(fpath_data) df_data['Group'] = (df_data['Direction'] != df_data['Direction'].shift()).cumsum() df_data['Trial'] = (df_data['Group']-1) // 3 df_data # + def notch_filter(freq=60.0, fs=250, Q=60): return signal.iirnotch(freq, freq / Q, fs=fs) def butter_filter(low=5.0, high=50.0, order=4, fs=250): nyq = fs / 2 return signal.butter(order, [low / nyq, high / nyq], btype='bandpass') def cheby_filter(low=5.0, high=50.0, order=1, fs=250, rp=1): nyq = fs / 2 return signal.cheby1(order, rp, [low / nyq, high / nyq], btype='bandpass') def filter_signal(data, notch=True, bandpass_fn=None, filter_type='offline', notch_freq=60.0, notch_Q=60, low_freq=5.0, high_freq=50, fs=250, notch_zi=None, bp_zi=None, realtime_shift=1): def filter_signal_realtime(b, a, data, zi=None): if zi is None: zi = signal.lfilter_zi(b, a) # construct initial conditions data_filtered = np.zeros_like(data) zi_tmp = zi for i_sample, sample in enumerate(data): sample_filtered, zi_tmp = signal.lfilter(b, a, [sample], zi=zi_tmp) data_filtered[i_sample] = sample_filtered[0] # realtime_shift is the proportion by which the next data window will be shifted # e.g. if realtime_shift is 1, then there is no overlap between data windows # if realtime_shift is 0.5, then the next window will have 50% overlap with the current one if i_sample == (realtime_shift * len(data)) - 1: zi = zi_tmp return data_filtered, zi # returns new initial conditions notch_b, notch_a = notch_filter(freq=notch_freq, Q=notch_Q, fs=fs) bandpass = False if bandpass_fn is not None: bp_b, bp_a = bandpass_fn(low=low_freq, high=high_freq, fs=fs) bandpass = True if filter_type == 'offline': if notch: data = filtering_fn(notch_b, notch_a, data) if bandpass: data = filtering_fn(bp_b, bp_a, data) return data elif filter_type == 'realtime': if filtering_fn != signal.lfilter: raise ValueError('Can only use scipy.signal.lfilter() for real-time filtering') if notch: data, notch_zi = filter_signal_realtime(notch_b, notch_a, data, zi=notch_zi) if bandpass: data, bp_zi = filter_signal_realtime(bp_b, bp_a, data, zi=bp_zi) return data, notch_zi, bp_zi else: raise ValueError(f'Filter type "{filter_type}" is invalid') # + filter_names = ['no', 'notch', 'butterworth', 'chebyshev', 'notch+butterworth', 'notch+chebyshev'] # filter_names = ['no', 'notch', 'realtime notch', 'notch+butterworth', 'realtime notch+butterworth', 'notch+chebyshev', 'realtime notch+chebyshev'] filter_functions = { 'no': (lambda data: filter_signal(data, notch=False, bandpass_fn=None)), 'notch': (lambda data: filter_signal(data, notch=True, bandpass_fn=None, filter_type='offline')), 'butterworth': (lambda data: filter_signal(data, notch=False, bandpass_fn=butter_filter, filter_type='offline')), 'chebyshev': (lambda data: filter_signal(data, notch=False, bandpass_fn=cheby_filter, filter_type='offline')), 'notch+butterworth': (lambda data: filter_signal(data, notch=True, bandpass_fn=butter_filter, filter_type='offline')), 'notch+chebyshev': (lambda data: filter_signal(data, notch=True, bandpass_fn=cheby_filter, filter_type='offline')), 'realtime notch': (lambda data, notch_zi, bp_zi: filter_signal(data, notch=True, bandpass_fn=None, filter_type='realtime', notch_zi=notch_zi, bp_zi=bp_zi)), 'realtime butterworth': (lambda data, notch_zi, bp_zi: filter_signal(data, notch=False, bandpass_fn=butter_filter, filter_type='realtime', notch_zi=notch_zi, bp_zi=bp_zi)), 'realtime chebyshev': (lambda data, notch_zi, bp_zi: filter_signal(data, notch=False, bandpass_fn=cheby_filter, filter_type='realtime', notch_zi=notch_zi, bp_zi=bp_zi)), 'realtime notch+butterworth': (lambda data, notch_zi, bp_zi: filter_signal(data, notch=True, bandpass_fn=butter_filter, filter_type='realtime', notch_zi=notch_zi, bp_zi=bp_zi)), 'realtime notch+chebyshev': (lambda data, notch_zi, bp_zi: filter_signal(data, notch=True, bandpass_fn=cheby_filter, filter_type='realtime', notch_zi=notch_zi, bp_zi=bp_zi)), } i_channel_to_plot = 0 channel_name = channel_names[i_channel_to_plot] remove_dc_offset = True block_length = 0.5 # for DC offset removal, in seconds i_trial = 10 window_length = 2 # in seconds # time_start = 8 # in seconds # window_length = 2 # in seconds # n_windows = 4 # i_time_start = fs*time_start # i_time_stop = i_time_start + (fs * window_length * n_windows) trial_data = df_data.loc[df_data['Trial'] == i_trial, channel_name] i_time_start = trial_data.index[0] i_time_stop = trial_data.index[-1] n_windows = (i_time_stop - i_time_start) / (fs * window_length) # remove DC offset if remove_dc_offset: n_blocks = math.ceil(len(trial_data)/(block_length*fs)) for block in np.array_split(trial_data, n_blocks): trial_data.loc[block.index] = block - np.mean(block) n_rows = len(filter_names) n_cols = 1 fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(12, n_rows*2)) for filter_name, ax in zip(filter_names, axes): notch_zi = None bp_zi = None if 'realtime' in filter_name: trial_data_filtered, _, _ = filter_functions[filter_name](trial_data, None, None) else: trial_data_filtered = filter_functions[filter_name](trial_data) ax.plot(trial_data.index / fs, trial_data_filtered, color='black', linestyle='--', alpha=0.7) for window in np.array_split(trial_data, n_windows): if 'realtime' in filter_name: window_filtered, notch_zi, bp_zi = filter_functions[filter_name](window, notch_zi, bp_zi) else: window_filtered = filter_functions[filter_name](window) ax.plot((window.index / fs), window_filtered, alpha=1) ax.set_xlim(i_time_start / fs, i_time_stop / fs) ax.set_title(f'{channel_name} ({filter_name} filter)') ax.set_xlabel('Time (s)') ax.set_ylabel('Amplitude') fig.tight_layout() prefix = 'filtfilt' fig.savefig(f'{prefix}_{"without" if remove_dc_offset else "with"}_dc_offset.png', dpi=300, bbox_inches='tight')
data/visualization/compare_filters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ironhack-3.7 # language: python # name: ironhack-3.7 # --- # # Mandatory Challenge # ## Context # You work in the data analysis team of a very important company. On Monday, the company shares some good news with you: you just got hired by a major retail company! So, let's get prepared for a huge amount of work! # # Then you get to work with your team and define the following tasks to perform: # 1. You need to start your analysis using data from the past. # 2. You need to define a process that takes your daily data as an input and integrates it. # # You are in charge of the second part, so you are provided with a sample file that you will have to read daily. To complete you task, you need the following aggregates: # * One aggregate per store that adds up the rest of the values. # * One aggregate per item that adds up the rest of the values. # # You can import the dataset `retail_sales` from Ironhack's database. # # ## Your task # Therefore, your process will consist of the following steps: # 1. Read the sample file that a daily process will save in your folder. # 2. Clean up the data. # 3. Create the aggregates. # 4. Write three tables in your local database: # - A table for the cleaned data. # - A table for the aggregate per store. # - A table for the aggregate per item. # # ## Instructions # * Read the csv you can find in Ironhack's database. # * Clean the data and create the aggregates as you consider. # * Create the tables in your local database. # * Populate them with your process. # + # your code here
module-1/Dataframe-Calculations/your-code/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt dates = pd.date_range('1992-01-01', '2012-10-22') np.random.seed(1) pnls = np.random.randint(-990, 1000, size=len(dates)) # slight positive bias pnls = pnls.cumsum() positions = np.random.randint(-1, 2, size=len(dates)) positions = positions.cumsum() strategy_performance = pd.DataFrame(index=dates, data={'PnL': pnls, 'Position': positions}) strategy_performance strategy_performance['PnL'].plot(figsize=(12,6), color='black', legend='PnL') strategy_performance['PnLStdev'] = strategy_performance['PnL'].rolling(20).std().fillna(method='backfill') strategy_performance['PnLStdev'].plot(figsize=(12,6), color='black', legend='PnLStdev') daily_pnl_series = strategy_performance['PnL'].shift(-1) - strategy_performance['PnL'] daily_pnl_series.fillna(0, inplace=True) avg_daily_pnl = daily_pnl_series.mean() std_daily_pnl = daily_pnl_series.std() sharpe_ratio = avg_daily_pnl/std_daily_pnl sharpe_ratio annualized_sharpe_ratio = sharpe_ratio * np.sqrt(252) annualized_sharpe_ratio strategy_performance['PnL'].plot(figsize=(12,6), color='black', legend='PnL') plt.axhline(y=28000, color='darkgrey', linestyle='--', label='PeakPnLBeforeDrawdown') plt.axhline(y=-15000, color='darkgrey', linestyle=':', label='TroughPnLAfterDrawdown') plt.vlines(x='2000', ymin=-15000, ymax=28000, label='MaxDrawdown', color='black', linestyle='-.') plt.legend()
ch01/risk_management.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys import importlib import seaborn as sns sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc') import my_plot importlib.reload(my_plot) from my_plot import MyPlotData, my_box_plot def to_ng_coord(coord): return ( int(coord[0]/4), int(coord[1]/4), int(coord[2]/40), ) import compress_pickle # input_graph = compress_pickle.load('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114_restricted_z.gz') input_graph = compress_pickle.load('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114.gz') grcs = [k for k in input_graph.grcs.keys()] rosette_loc_size = {} for mf_id, mf in input_graph.mfs.items(): mf.get_all_mf_locs_size(rosette_loc_size) # z_min = 15 # z_max = 35 z_min = 20 z_max = 30 mpd = MyPlotData() averages = [] for rosette_loc, size in rosette_loc_size.items(): x, y, z = rosette_loc if x < 360000 or x > 520000: continue if z < z_min*1000 or z > z_max*1000: continue averages.append(size) mpd.add_data_point( rosette_loc=rosette_loc, size=size, type='Rosette' ) import statistics mean = statistics.mean(averages) median = statistics.median(averages) stdev = statistics.stdev(averages, mean) print(f'Mean: {mean}, median: {median}, std: {stdev}') print(f'Average: {sum(averages)/len(averages)}') print(f'n = {len(averages)}') # script_n = os.path.basename(__file__).split('.')[0] script_n = 'rosette_size_figure_210228_smaller_roi' # + importlib.reload(my_plot); my_plot.my_displot( mpd, x='size', kind='hist', context='paper', kde=True, stat='density', height=4, aspect=2, x_axis_label='# of GrC partners per MF', y_axis_label='Normalized Frequency', save_filename=f'{script_n}_{z_min}_{z_max}_hist.svg', show=True, ) importlib.reload(my_plot); my_plot.my_displot( mpd, x='size', kind='kde', context='paper', # kde=True, height=4, aspect=2, x_axis_label='# of GrC partners per MF', y_axis_label='Normalized Frequency', save_filename=f'{script_n}_{z_min}_{z_max}_kde.svg', show=True, ) importlib.reload(my_plot); my_plot.my_displot( mpd, x='size', kind='ecdf', context='paper', # ecdf=True, height=4, aspect=2, y_axis_label='Cumulative Distribution', x_axis_label='# of GrC partners per MF', save_filename=f'{script_n}_{z_min}_{z_max}_ecdf.svg', show=True, ) # + importlib.reload(my_plot); my_plot.my_catplot( mpd, # x="size", y="size", # hue="type", # hue_order=['All', 'Per PC', 'Per pf'], # hue_order=['All', 'Per PC'], ylim=[None, 50], context='paper', kind='violin', cut=0, # palette=sns.color_palette("mako_r", as_cmap=True), # palette=sns.color_palette("rocket"), color='coral', # font_scale=1.5, height=4, # width=2, aspect=.75, y_axis_label='# of GrC partners per MF', # x_axis_label='# of GrCs per MF', save_filename=f'{script_n}_{z_min}_{z_max}_violin.svg', show=True, ) # + import compress_pickle # input_graph = compress_pickle.load('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114_restricted_z.gz') input_graph = compress_pickle.load('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114.gz') grcs = [k for k in input_graph.grcs.keys()] for mf_id, mf in input_graph.mfs.items(): for rosette_loc in mf.claws_gt: x, y, z = rosette_loc if x < 400000 or x > 480000: continue if z < z_min*1000 or z > z_max*1000: continue print(rosette_loc) print(mf_id) for grc_id in mf.claws[rosette_loc]: print(grc_id, end=', ') # print(mf.claws[rosette_loc]) print('\n\n') # rosette_loc_size # # z_min = 15 # # z_max = 35 # z_min = 20 # z_max = 30 # mpd = MyPlotData() # averages = [] # for rosette_loc, size in rosette_loc_size.items(): # x, y, z = rosette_loc # if x < 360000 or x > 520000: # continue # if z < z_min*1000 or z > z_max*1000: # continue # averages.append(size) # mpd.add_data_point( # rosette_loc=rosette_loc, # size=size, # type='Rosette' # )
analysis/mf_grc_analysis/rosette_size/rosette_size_figure_210228_smaller_roi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="64OqwtNarnIx" # ### Pandas # - 데이터 분석을 위한 사용이 쉽고 성능이 좋은 오픈소스 python 라이브러리 # - R과 Pandas의 특징 # - R보다 Pandas가 학습이 쉽습니다. # - R보다 Pandas가 성능이 좋습니다. # - R보다 Python은 활용할수 있는 분야가 많습니다. # - 크게 두가지 데이터 타입을 사용합니다. # - Series : index와 value로 이루어진 데이터 타입 # - DataFrame : index, column, value로 이루어진 데이터 타입 # + colab={} colab_type="code" id="uaWfdFG9rnIz" import numpy as np import pandas as pd # + [markdown] colab_type="text" id="6V4UXFv_rnI1" # ### 1. Series # - 동일한 데이터 타입의 값을 갖습니다. # + colab={} colab_type="code" id="KkgJpRTSrnI2" outputId="366d0ca8-002b-41ca-af6a-87b44ee91f05" # Series : value 만 설정하면 index는 0부터 자동으로 설정 됩니다. data = pd.Series(np.random.randint(10, size=5)) data # + colab={} colab_type="code" id="zzvqk-CwrnI4" outputId="d39c5473-4750-4969-af99-6dc2223bd93d" # index 설정 data = pd.Series(np.random.randint(10, size=5), index=list("ABCDE")) data # + colab={} colab_type="code" id="qYdW6ZDNrnI6" outputId="c1dfa810-0f64-4124-cf04-b5d702e0e560" data.index, data.values # + colab={} colab_type="code" id="ludUTtS5rnI8" outputId="558426e3-c3b8-4080-bb18-6ca6fa7fc7d9" data["B"], data.B # + colab={} colab_type="code" id="n31WxXDNrnI-" outputId="6ade0737-4224-43cd-9b9f-6cbae4016160" data["C"] = 10 data # + colab={} colab_type="code" id="mFzq3uWHrnI_" outputId="4f34291c-8fc7-43e7-8b81-c86b37b4a05b" # 브로드 캐스팅 data * 10 # + colab={} colab_type="code" id="et6TZoT0rnJB" outputId="76a50535-3535-4f5e-fea3-e8e09a001e7e" data[["B","E"]] # + colab={} colab_type="code" id="dhtvBfhPrnJD" outputId="9f1cdb8b-86d0-4605-cf7b-ffd3ffa588c5" # offset index data[2::2] # + colab={} colab_type="code" id="oP0GEJS6rnJE" outputId="89f86aec-0ebc-41a5-8b90-b89377dfd690" data[::-1] # + [markdown] colab_type="text" id="kWyglP3ernJH" # Series 연산 # + colab={} colab_type="code" id="RQoumg4OrnJI" outputId="24e59d8f-79cf-40f6-ed6f-34ae4d3e2269" data # + colab={} colab_type="code" id="hKHPvscrrnJJ" outputId="1a97409c-ae12-482f-c8e9-93e28b7a2529" data2 = pd.Series({"D":3, "E":5, "F":7}) data2 # + colab={} colab_type="code" id="Iy5ewXk0rnJL" outputId="9562e69c-24e3-4609-946e-5f09e1853fff" result = data + data2 result # None # + colab={} colab_type="code" id="nkQwohBgrnJN" outputId="537f3ed3-2ef4-4290-99cb-9274852d5bb0" result[result.isnull()] = data result # + colab={} colab_type="code" id="mrLtU3X_rnJO" outputId="2d5bfe06-1d42-4732-c394-b93441b9e68e" result[result.isnull()] = data2 result # + [markdown] colab_type="text" id="90ULBRJVrnJQ" # ### 2. DataFrame # - 데이터 프레임은 여러개의 Series로 구성 # - 같은 컬럼에 있는 value값은 같은 데이터 타입을 갖습니다. # + colab={} colab_type="code" id="9gWQJUzGrnJR" # 데이터 프레임 생성 1 : 딕셔너리의 리스트 # 열 단위로 list type으로 저장 # + colab={} colab_type="code" id="EXgerTe3rnJS" outputId="b25ba860-b454-47ee-c803-9fe46d0c9473" datas = { "name":["dss", "fcamp"], "email":["<EMAIL>", "<EMAIL>"], } datas # + colab={} colab_type="code" id="BjYdY7jTrnJU" outputId="4ec1229e-1eb2-4a5d-d987-80e541395ee7" df = pd.DataFrame(datas) df # + colab={} colab_type="code" id="-QX57vBtrnJV" # 데이터 프레임 생성 2 : 리스트의 딕셔너리 # 행 단위로 -> 채워줌 # + colab={} colab_type="code" id="rzbjMwAMrnJY" outputId="23dacfda-33b5-4a31-c41d-7b748fa081bc" datas = [ {"name":"dss", "email":"<EMAIL>"}, {"name":"fcamp", "email":"<EMAIL>"}, ] datas # + colab={} colab_type="code" id="BX9I9AlhrnJb" outputId="e2435993-64c1-4ef6-b153-b1fb7c374a5a" df = pd.DataFrame(datas) df # + colab={} colab_type="code" id="RLsz2COBrnJe" outputId="348abfd5-6b97-4c26-9474-d48d125c9724" # 인덱스를 추가하는 방법 df = pd.DataFrame(datas, index=["one", "two"]) df # + colab={} colab_type="code" id="5Ev4SfU7rnJf" outputId="606c20ec-42e1-4deb-b43b-82752091e203" df.index # + colab={} colab_type="code" id="BQbbO4yBrnJh" outputId="ca89c31b-61f4-4e27-f760-248f80720422" df.columns # + colab={} colab_type="code" id="VIlmCtX0rnJj" outputId="62450ee6-c976-42be-d3a4-14599b42e1c3" df.values # + colab={} colab_type="code" id="-qoYMrCdrnJk" # 데이터 프레임에서 데이터의 선택 : row, colunm, (row,column) # + colab={} colab_type="code" id="cFXrbTxDrnJn" outputId="fc98a118-49be-4a89-b2d6-653e89ab011a" # row 선택 : loc df = pd.DataFrame(datas) df # + colab={} colab_type="code" id="tggNibqsrnJo" outputId="f309011f-962e-4aa4-ffb4-5111b466aee4" df.loc[1]["email"] # + colab={} colab_type="code" id="Cgs3mQzxrnJp" outputId="78e6060b-0b45-4e95-f258-d57e1bb199a9" # index가 있으면 수정, 없으면 추가 df.loc[2] = {"name":"andy", "email":"<EMAIL>"} df # + colab={} colab_type="code" id="4fZezYqgrnJr" # column 선택 # + colab={} colab_type="code" id="Hh0CvVLYrnJs" outputId="aefbc166-1db6-4f4d-ec3d-6257a1a00b35" df # + colab={} colab_type="code" id="tPog8-jtrnJu" outputId="e74e98ea-8e25-4684-be98-ed67333b8caf" df["name"] # + colab={} colab_type="code" id="0WWHkjWRrnJw" outputId="610ad8f9-ed4a-4623-a079-ea269fbfa47b" df["id"] = "" df # + colab={} colab_type="code" id="baEdbV_ernJx" outputId="990ecd4c-98ad-440e-f4c0-4a784f4b10a5" df["id"] = range(1, 4) # np.arange(1, 4) df # + colab={} colab_type="code" id="JdzKRQSErnJy" outputId="f675721b-b356-4cf2-f78e-0c5a6ccace2d" df.dtypes # + colab={} colab_type="code" id="tLsvB7CbrnJ0" # row, column 선택 # + colab={} colab_type="code" id="NAArh62VrnJ1" outputId="c2b61129-3785-43e1-915d-ec26912c1f3f" df.loc[[0, 2], ["email", "id"]] # + colab={} colab_type="code" id="pFHyqoN-rnJ2" # 컬럼 데이터 순서 설정 # + colab={} colab_type="code" id="UpTvYFm5rnJ4" outputId="b5bc50d4-2479-4258-cb37-200d8963a8fe" df[["id", "name", "email"]] # + colab={} colab_type="code" id="WDhH7qpDrnJ7" # head, tail # + colab={} colab_type="code" id="-2gRexkPrnJ8" outputId="e0744b2c-58d8-4c01-c4cb-f5a622aeabe1" df.head(2) # + colab={} colab_type="code" id="cxaPe-aernJ9" outputId="78a51908-55bb-45a6-89fe-684f25a58299" df.tail(2) # + [markdown] colab_type="text" id="YzgbhHv2rnJ_" # ### 3. apply 함수 # - map 함수와 비슷 # + colab={} colab_type="code" id="BvSRhfMmrnJ_" outputId="e26e4ca5-db80-4e24-c2ec-91144a8d2252" # email 컬럼에서 메일의 도메인만 가져와서 새로운 domain 컬럼을 생성 df # + colab={} colab_type="code" id="XJh7eOwRrnKB" outputId="7b903926-349d-41dc-e730-f01614120062" def domain(email): return email.split("@")[1].split(".")[0] domain(df.loc[0]["email"]) # + colab={} colab_type="code" id="omtFE4IRrnKE" outputId="43c2ec11-0531-45a6-9f7e-d462157aa687" df["domain"] = df["email"].apply(domain) df # + colab={} colab_type="code" id="ncKTQtm3rnKF" outputId="d15ae949-9595-4aea-cb47-e322f7a2992b" df["domain"] = df["email"].apply(lambda email: email.split("@")[1].split(".")[0]) df # + colab={} colab_type="code" id="ytrif692rnKJ" from makedata import * # + colab={} colab_type="code" id="ISHhp2fNrnKK" outputId="08ba866d-09b5-4393-9d6e-f41ab760ab9a" get_name() # + colab={} colab_type="code" id="I6Y3Ca6lrnKM" outputId="c6461fbe-13aa-4684-82a8-a41ad049c969" get_age() # + colab={} colab_type="code" id="BVQVJpE5rnKN" outputId="98fb71ef-fd53-4152-96f2-b0e9a0db9fa1" df1 = pd.DataFrame(make_data(5)) df2 = pd.DataFrame(make_data(5)) df2 # + [markdown] colab_type="text" id="biz9O4ournKO" # ### 4. append # + colab={} colab_type="code" id="etpLJYFjrnKO" outputId="6ee93bfb-5763-44bf-ad6d-640968062a7b" # append 데이터 프레임 합치기 df3 = df1.append(df2) df3[2:7] # + colab={} colab_type="code" id="wDE-aWmjrnKP" outputId="aee2768e-7df9-4fea-bc2d-d3f37d6c1201" # reset_index 인덱스 재정렬 df3.reset_index(drop=True, inplace=True) df3.tail(2) # + colab={} colab_type="code" id="Pm07wCN-rnKQ" outputId="5f1d0c04-01b3-4fb3-8d01-e63967c1111b" df3 = df1.append(df2, ignore_index=True) df3.tail(2) # + [markdown] colab_type="text" id="Hbxrn2HmrnKR" # ### 5. concat # - row나 column으로 데이터 프레임을 합칠때 사용 # + colab={} colab_type="code" id="ib8Yp1K6rnKS" outputId="5c5a3a84-9176-4ff6-fbeb-a155883cacd7" df3 = pd.concat([df1, df2]).reset_index(drop=True) df3.tail(2) # + colab={} colab_type="code" id="gz-ogMfMrnKT" outputId="3cd9c157-436c-4457-dc23-7ba62166411c" pd.concat([df3, df1], axis=1) # + colab={} colab_type="code" id="Jvt1LFMornKV" outputId="d629de51-daa7-4d6b-a053-75f8599b2336" pd.concat([df3, df1], axis=1, join="inner") # + [markdown] colab_type="text" id="6qRbnrRirnKW" # ### 6. group by # - 특정 컬럼의 중복되는 데이터를 합쳐서 새로운 데이터 프레임을 만드는 방법 # + colab={} colab_type="code" id="6TkQdw66rnKX" outputId="8b6d8c39-aba1-43d6-ba43-f4d23c92170e" df = pd.DataFrame(make_data()) df # + colab={} colab_type="code" id="L8J8bFggrnKY" outputId="500a586b-b5e0-4825-9f23-192c57087bd1" # size result_df = df.groupby("Name").size().reset_index(name="count") result_df # + colab={} colab_type="code" id="kHZEOrtzrnKZ" # sort_values : 설정한 컬럼으로 데이터 프레임을 정렬 # + colab={} colab_type="code" id="PD-PqgAOrnKc" outputId="a2e252cf-3f62-4073-b10a-d4f85fbc5745" result_df.sort_values(["count", "Name"], ascending=False, inplace=True) result_df.reset_index(drop=True, inplace=True) result_df # + colab={} colab_type="code" id="hKy08aN1rnKf" # agg() # size(), min(), max(), mean() # + colab={} colab_type="code" id="tmUIs1KMrnKg" outputId="2916011d-cbbe-4150-9703-e8a2a73b98c4" df.groupby("Name").agg("mean").reset_index() # + colab={} colab_type="code" id="TArNq9ofrnKh" outputId="8ae34c84-a20f-4a6c-80ba-a7724724cec9" # 데이터를 요약해서 보여주는 함수 df.describe() # + [markdown] colab_type="text" id="d5E-n7AXrnKi" # ### 7. Merge = sql(join) # - 두개 이상의 데이터 프레임을 합쳐서 결과를 출력하는 방법
1.Study/2. with computer/4.Programming/2.Python/3. Study/02_Numpy_Pandas/03_pandas_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 4. Archivos Json y API's # ## Javascript Object Notation (JSON) # - Es una notación común para data web # - No tabular: # - Registros no tienen todos el mismo conjunto de atributos # - Datos se encuentran organizados en colecciones de objetos # - Objectos son colecciones de atributos clave : valor # - Json nested: objectos se encuentran anidados # #### Tipos de Archivo Json # - **Record Orientation**: JSON más común # # <img src='./img/json_record_orientation.png' width="600" height="1000"> # - **Column Orientation**: Uso de espación más eficiente que en record Orientation # # <img src='./img/json_column_orientation.png' width="300" height="500"> # - **Specifying Orientation**: Divide orientation data # # <img src='./img/json_specify_orientation.png' width="300" height="500"> # #### Lectura de Archivo Json con Pandas # + import pandas as pd death_causes = pd.read_json("./datasets/example.json", orient="records" ) death_causes.head(2) # - # ## Introducción al uso de API's # - Define como una aplicación se comunica con otros programas # - Es una forma de obtener data sin conocer detalles de la base de datos # <img src='./img/api.png' width="500" height="700"> # Para poder estos datos una forma general de hacerlo es usando la libreria <code>request</code> # #### Request # # - Envía y obtiene datos de una página web # - No se encuentra vinculado a un api en particular # - <code>request.get(url_string)</code> para obtener data de una url en particular # def read_file(filename): with open(filename) as f: api=f.readlines() return api # #### Open Weathermap API_KEY_WEATHER=read_file('weather_api.txt')[0] # + import requests # api gratuita: https://home.openweathermap.org/ # documentacion: https://openweathermap.org/api response = requests.get("http://api.openweathermap.org/data/2.5/weather?q=London,uk&APPID={}".format(API_KEY_WEATHER)) # weather_api_data=json.loads(weather_json.text) # - response data=response.json() data import json type(data) # #### Parámetros <code>request.get(url_string)</code> # Este método presenta algunos parámetros admisibles como: # # - Keywords arguments: # - **params**: permite pasar un diccionario de la llave y el valor del API # - **headers**: similar al anterior pero este método debe ser usado para autenticarse con el API. # - La respuesta obtenida será un objeto <code>response</code> el cual contendrá data y metadata. # - <code>response.json()</code> nos brindará solo la data # ### Ejemplo # Yelp es una empresa pública estadounidense con sede en San Francisco, California. La compañía desarrolla, aloja y comercializa el sitio web de Yelp.com y la aplicación móvil de Yelp, que publican reseñas de empresas de fuentes públicas. También opera un servicio de reservas en línea llamado Reservas de Yelp # + # obtener API access: https://rapidapi.com/blog/yelp-fusion-api-profile-pull-local-business-data/?utm_source=google&utm_medium=cpc&utm_campaign=DSA&gclid=Cj0KCQjwlvT8BRDeARIsAACRFiWVp-3OcJVj9IGyxHIeZwa6Abn5VqM55qhjnMyONKZ5-btXIu8a9VQaAtaZEALw_wcB # - API_KEY_YELP = read_file('yelp_api.txt')[1] # + # documentacion: https://www.yelp.com/developers/documentation/v3/business_search import requests api_url = "https://api.yelp.com/v3/businesses/search" # Colocando parámetros según documentación del sitio params = {"term": "bookstore", #termino a buscar "location": "San Francisco" # locacion } headers = {"Authorization": "Bearer {}".format(API_KEY_YELP)} # + response = requests.get(api_url, headers=headers, params=params) # Extraigo JSON data de response data = response.json() # - data.keys() # Cargando data a dataframe df_yelp = pd.DataFrame(data["businesses"]) df_yelp.head(2) # Visualizando tipo de datos de la respuesta print(df_yelp.dtypes) # ## Trabajando con Nested JSON # - JSON contiene atributos clave - valor # - JSON es nested es cuando el valor es un objeto # <img src='./img/json_nested.png' width="500" height="700"> print(df_yelp[["categories","coordinates","location"]].head(3)) # #### pandas.io.json # # - Contiene un sub modulo el cual nos permite leer y escribir json nested import pandas as pd import requests #from pandas.io.json import json_normalize # en desuso según documentación actual # Set up headers, parameters, and API endpoint api_url = "https://api.yelp.com/v3/businesses/search" headers = {"Authorization": "Bearer {}".format(API_KEY_YELP)} params = {"term": "bookstore", "location": "San Francisco"} # Make the API call and extract the JSON data response = requests.get(api_url, headers=headers, params=params) data = response.json() # Flatten data and load to data frame, with _ separators bookstores = pd.json_normalize(data["businesses"], sep="_") print(list(bookstores)) bookstores.head(1) print(bookstores.categories.head()) df = pd.json_normalize(data["businesses"], sep="_", record_path=["categories"], meta=["name", "alias", "rating", ["coordinates","latitude"], ["coordinates","longitude"]], meta_prefix="biz_") df.head(1) # ## Recursos Adicionales # - [working with json pandas](https://kanoki.org/2019/12/12/how-to-work-with-json-in-pandas/) # - [open street maps api](https://towardsdatascience.com/loading-data-from-openstreetmap-with-python-and-the-overpass-api-513882a27fd0) # - [google maps api python](https://github.com/googlemaps/google-maps-services-python) # - [pandas documentacion lectura json](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.json_normalize.html) # - [us data gov](https://docs.ckan.org/en/latest/api/index.html)
Modulo1/4. Archivos Json y API's.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pyvizenv] * # language: python # name: conda-env-pyvizenv-py # --- # # Credit Risk Resampling Techniques import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd from pathlib import Path from collections import Counter from sklearn.preprocessing import LabelEncoder from sklearn.metrics import balanced_accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score from imblearn.metrics import classification_report_imbalanced from imblearn.metrics import geometric_mean_score # # Read the CSV into DataFrame # Load the data file_path = Path('Resources/lending_data.csv') df = pd.read_csv(file_path) df.head() # + # Creating an instance of label encoder label_encoder = LabelEncoder() label_encoder.fit(df["loan_status"]) # Encode the loan_status as an integer: 1="low_risk", 0="high_risk" df["loan_status"] = label_encoder.transform(df["loan_status"]) # Perform a binary encoding on the `homeowner` columns using the Pandas `get_dummies()` function. df = pd.get_dummies(df, columns=["homeowner"]) # Re-order column "loan_status" to last: cols = ['loan_size', 'interest_rate', 'borrower_income', 'debt_to_income', 'num_of_accounts', 'derogatory_marks', 'total_debt', 'homeowner_mortgage', 'homeowner_own', 'homeowner_rent', 'loan_status'] df = df[cols] df.head() # - # # Split the Data into Training and Testing # Create our features # YOUR CODE HERE X = df.drop("loan_status", axis=1) # Create our target # YOUR CODE HERE y = df["loan_status"] X.describe() # Check the balance of our target values y.value_counts() # + # Create X_train, X_test, y_train, y_test # YOUR CODE HERE from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) Counter(y_train) # - # ## Data Pre-Processing # # Scale the training and testing data using the `StandardScaler` from `sklearn`. Remember that when scaling the data, you only scale the features data (`X_train` and `X_testing`). # Create the StandardScaler instance from sklearn.preprocessing import StandardScaler # YOUR CODE HERE scaler = StandardScaler() # Fit the Standard Scaler with the training data # When fitting scaling functions, only train on the training dataset # YOUR CODE HERE X_scaler = scaler.fit(X_train) # Scale the training and testing data # YOUR CODE HERE X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # # Simple Logistic Regression from sklearn.linear_model import LogisticRegression model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_train_scaled, y_train) # + # Calculated the balanced accuracy score y_pred = model.predict(X_test_scaled) balanced_accuracy_score(y_test, y_pred) # - # Display the confusion matrix confusion_matrix(y_test, y_pred) # Print the imbalanced classification report print(classification_report_imbalanced(y_test, y_pred)) # # Oversampling # # In this section, you will compare two oversampling algorithms to determine which algorithm results in the best performance. You will oversample the data using the naive random oversampling algorithm and the SMOTE algorithm. For each algorithm, be sure to complete the folliowing steps: # # 1. View the count of the target classes using `Counter` from the collections library. # 3. Use the resampled data to train a logistic regression model. # 3. Calculate the balanced accuracy score from sklearn.metrics. # 4. Print the confusion matrix from sklearn.metrics. # 5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn. # # Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests # ### Naive Random Oversampling # + # Resample the training data with the RandomOversampler # YOUR CODE HERE from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(random_state=1) X_resampled, y_resampled = ros.fit_resample(X_train_scaled, y_train) # View the count of target classes with Counter # YOUR CODE HERE Counter(y_resampled) # + # Train the Logistic Regression model using the resampled data # YOUR CODE HERE model_resampled = LogisticRegression(solver='lbfgs', random_state=1) model_resampled.fit(X_resampled, y_resampled) # - # Calculated the balanced accuracy score # YOUR CODE HERE y_resampled_pred = model_resampled.predict(X_test_scaled) balanced_accuracy_score(y_test, y_resampled_pred) # Display the confusion matrix # YOUR CODE HERE confusion_matrix(y_test, y_resampled_pred) # Print the imbalanced classification report # YOUR CODE HERE print(classification_report_imbalanced(y_test, y_resampled_pred)) # ### SMOTE Oversampling # + # Resample the training data with SMOTE # YOUR CODE HERE from imblearn.over_sampling import SMOTE X_smote_resampled, y_smote_resampled = SMOTE(random_state=1, sampling_strategy=1.0).fit_resample(X_train_scaled, y_train) # View the count of target classes with Counter # YOUR CODE HERE Counter(y_smote_resampled) # - # Train the Logistic Regression model using the resampled data # YOUR CODE HERE model_smote_resampled = LogisticRegression(solver='lbfgs', random_state=1) model_smote_resampled.fit(X_smote_resampled, y_smote_resampled) # Calculated the balanced accuracy score # YOUR CODE HERE y_smote_resampled_pred = model_smote_resampled.predict(X_test_scaled) balanced_accuracy_score(y_test, y_smote_resampled_pred) # Display the confusion matrix # YOUR CODE HERE confusion_matrix(y_test, y_smote_resampled_pred) # Print the imbalanced classification report # YOUR CODE HERE print(classification_report_imbalanced(y_test, y_smote_resampled_pred)) # # Undersampling # # In this section, you will test an undersampling algorithm to determine which algorithm results in the best performance compared to the oversampling algorithms above. You will undersample the data using the Cluster Centroids algorithm and complete the folliowing steps: # # 1. View the count of the target classes using `Counter` from the collections library. # 3. Use the resampled data to train a logistic regression model. # 3. Calculate the balanced accuracy score from sklearn.metrics. # 4. Display the confusion matrix from sklearn.metrics. # 5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn. # # Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests # + # Resample the data using the ClusterCentroids resampler # YOUR CODE HERE from imblearn.under_sampling import ClusterCentroids cc = ClusterCentroids(random_state=1) X_cc_resampled, y_cc_resampled = cc.fit_resample(X_train_scaled, y_train) # View the count of target classes with Counter # YOUR CODE HERE Counter(y_cc_resampled) # - # Train the Logistic Regression model using the resampled data # YOUR CODE HERE model_cc_resampled = LogisticRegression(solver='lbfgs', random_state=1) model_cc_resampled.fit(X_cc_resampled, y_cc_resampled) # Calculate the balanced accuracy score # YOUR CODE HERE y_cc_resampled_pred = model_cc_resampled.predict(X_test_scaled) balanced_accuracy_score(y_test, y_cc_resampled_pred) # Display the confusion matrix # YOUR CODE HERE confusion_matrix(y_test, y_cc_resampled_pred) # Print the imbalanced classification report # YOUR CODE HERE print(classification_report_imbalanced(y_test, y_cc_resampled_pred)) # # Combination (Over and Under) Sampling # # In this section, you will test a combination over- and under-sampling algorithm to determine if the algorithm results in the best performance compared to the other sampling algorithms above. You will resample the data using the SMOTEENN algorithm and complete the folliowing steps: # # 1. View the count of the target classes using `Counter` from the collections library. # 3. Use the resampled data to train a logistic regression model. # 3. Calculate the balanced accuracy score from sklearn.metrics. # 4. Display the confusion matrix from sklearn.metrics. # 5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn. # # Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests # + # Resample the training data with SMOTEENN # YOUR CODE HERE from imblearn.combine import SMOTEENN smote_enn = SMOTEENN(random_state=1) X_smoteenn_resampled, y_smoteenn_resampled = smote_enn.fit_resample(X_train_scaled, y_train) # View the count of target classes with Counter # YOUR CODE HERE Counter(y_smoteenn_resampled) # - # Train the Logistic Regression model using the resampled data # YOUR CODE HERE model_smoteenn_resampled = LogisticRegression(solver='lbfgs', random_state=1) model_smoteenn_resampled.fit(X_smoteenn_resampled, y_smoteenn_resampled) # Calculate the balanced accuracy score # YOUR CODE HERE y_smoteenn_resampled_pred = model_smoteenn_resampled.predict(X_test_scaled) balanced_accuracy_score(y_test, y_smoteenn_resampled_pred) # Display the confusion matrix # YOUR CODE HERE confusion_matrix(y_test, y_smoteenn_resampled_pred) # Print the imbalanced classification report # YOUR CODE HERE print(classification_report_imbalanced(y_test, y_smoteenn_resampled_pred)) # + recall = recall_score(y_test, y_pred, average='micro') recall_oversampling = recall_score(y_test, y_resampled_pred, average='micro') recall_smote = recall_score(y_test, y_smote_resampled_pred, average='micro') recall_cc = recall_score(y_test, y_cc_resampled_pred, average='micro') recall_smoteenn = recall_score(y_test, y_smoteenn_resampled_pred, average='micro') print(f"Recall score for Simple Logistic Regression: {recall}") print(f"Recall score for Random Over Sampling: {recall_oversampling}") print(f"Recall score for Over Sampling Smote: {recall_smote}") print(f"Recall score for Under Sampling ClusterCentroids: {recall_cc}") print(f"Recall score for Combination Sampling SMOTEENN: {recall_smoteenn}") # + geo = geometric_mean_score(y_test, y_pred, average='micro') geo_oversampling = geometric_mean_score(y_test, y_resampled_pred, average='micro') geo_smote = geometric_mean_score(y_test, y_smote_resampled_pred, average='micro') geo_cc = geometric_mean_score(y_test, y_cc_resampled_pred, average='micro') geo_smoteenn = geometric_mean_score(y_test, y_smoteenn_resampled_pred, average='micro') print(f"Geometric mean score for Simple Logistic Regression: {geo}") print(f"Geometric mean score for Random Over Sampling: {geo_oversampling}") print(f"Geometric mean score for Over Sampling Smote: {geo_smote}") print(f"Geometric mean score for Under Sampling ClusterCentroids: {geo_cc}") print(f"Geometric mean score for Combination Sampling SMOTEENN: {geo_smoteenn}") # - # # Final Questions # # 1. Which model had the best balanced accuracy score? # # SMOTE Oversampling, Naive Random Oversampling, Combination has the same accuracy score of 0.9934649587814939 using random_state=1 and are the best out of all model # # 2. Which model had the best recall score? # # SMOTE Oversampling and Naive Random Oversampling has the same recall score using random_state=1 and are the best out of all model. # # 3. Which model had the best geometric mean score? # # SMOTE Oversampling, Naive Random Oversampling, Combination has the same geometric mean score using random_state=1 and are the best out of all model. #
Starter_Code/credit_risk_resampling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Introduction # From v2.8.0, pymatgen comes with a fairly robust system of managing units. In essence, subclasses of float and numpy array is provided to attach units to any quantity, as well as provide for conversions. These are loaded at the root level of pymatgen and some properties (e.g., atomic masses, final energies) are returned with attached units. This demo provides an outline of some of the capabilities. # Let's start with some common units, like Energy. import pymatgen.core as mg #The constructor is simply the value + a string unit. e = mg.units.Energy(1000, "Ha") #Let's perform a conversion. Note that when printing, the units are printed as well. print(f"{e} = {e.to('eV')}") #To check what units are supported print(f"Supported energy units are {e.supported_units}") # Units support all functionality that is supported by floats. Unit combinations are automatically taken care of. dist = mg.units.Length(65, "mile") time = mg.units.Time(30, "min") speed = dist / time print(f"The speed is {speed}") #Let's do a more sensible unit. print(f"The speed is {speed.to('mile h^-1')}") # Note that complex units are specified as **space-separated powers of units**. Powers are specified using "^". E.g., "kg m s^-1". Only **integer powers** are supported. # Now, let's do some basic science. g = mg.units.FloatWithUnit(9.81, "m s^-2") #Acceleration due to gravity m = mg.units.Mass(2, "kg") h = mg.units.Length(10, "m") print(f"The force is {m * g}") print(f"The potential energy is force is {(m * g * h).to('J')}") # Some highly complex conversions are possible with this system. Let's do some made up units. We will also demonstrate pymatgen's internal unit consistency checks. made_up = mg.FloatWithUnit(100, "Ha^3 bohr^-2") print(made_up.to("J^3 ang^-2")) try: made_up.to("J^2") except mg.units.UnitError as ex: print(ex) # For arrays, we have the equivalent EnergyArray, ... and ArrayWithUnit classes. All other functionality remain the same. dists = mg.units.LengthArray([1, 2, 3], "mile") times = mg.units.TimeArray([0.11, 0.12, 0.23], "h") print(f"Speeds are {dists / times}") # ## This concludes the tutorial on units in pymatgen.
notebooks/2013-01-01-Units.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <font size="+5">#02 | Dissecting the Object with Pandas DataFrame Properties</font> # - Subscribe to my [Blog ↗](https://blog.pythonassembly.com/) # - Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄 # ## All that glitters is not gold # # > - Not all objects can call the same functions # > - Even though they may store the same information # ### `list` # > - Create a list with your math `grades` # > - Compute the mean # > - You cannot do the mean # > - But you could `sum()` and `len()` # > - And divide the sum by the number of exams you did # > - [ ] Isn't it an `object` that could calculate the `mean()`? # ### `Series` # > - Use the `.` + `[tab]` key to see the `functions/methods` of the object # ## How to Access the `items` of an `Object` # ### The `list` # > - Create a list of your best friends # > - Access the 2nd element `'pepe'` ↓ # ### The `dict` # > - Access the 2nd element `'pepe'` ↓ # ## Store the information in Python `objects` for the Best Tennis Players # > - income # > - titles # > - grand slams # > - turned professional # > - wins # > - losses # ### Create a `dictionary` for Roger Federer # + [markdown] tags=[] # ### Create a `dictionary` for <NAME> # - # ### Create a `dictionary` for <NAME> # ### How much wealth did all of them earned? # > - You may put all of them into a `list` # > - And `sum()` the `wealth` # > - The `sum()` is not an action # > - that a simple object `list` can perform # > - [ ] Could we convert the list into a # > - more powerful object # > - that could compute the `sum()`? # > - Access the `wealth` column # > - and compute the `sum()` # > - [ ] Which type of `object` is the table? # > - [ ] What else can we do with this `object`? # ## Can we select specific parts of the `DataFrame`? # > - [ ] names of rows # > - [ ] names of columns # > - [ ] number of rows & columns # ## Vocabulary Recap # # 1. object/ # 2. function/method # 3. parameter/argument # 4. library # ## A practical case # # > - Retrieve information from an url # > - and convert it into a DataFrame # > - to operate with the Data # ### Retrieve the Information from an `url` # https://github.com/jsulopz/data # > - Find the `function()` that gets the content from an `url` # > - Is the object just `<Response [200]>` # > - Or may it contain more information/data? # > - How can you access the data we see [here](https://raw.githubusercontent.com/jsulopz/data/main/best_tennis_players_stats.json) # > - Is there a way to get the data from the `url` # > - just like ↓ # > - and not this ↓ # > - Apply the discipline to find a `function()` within the object # ### Recap # ### Shouldn't it be easier? # > - Apply the discipline to find `function()` within some library # > - And now calculate the `sum()` of the `income`
I Resolving Python with Data Science/04_Transforming Basic Objects into the Powerful DataFrame/02session_disecting-the-dataframe-object.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from search import Problem import sys class EstadoTransporte(object): def __init__(self, dimension, ofertas, demandas,\ asignaciones=None, costos=None): """ dimension es la dimension de la matriz (debe ser cuadrada) ofertas y demandas deben ser listas """ if sum(ofertas) != sum(demandas): raise NotImplementedError('Todavia no funciona para desbalances') self.dimension = dimension self.ofertas = np.asanyarray([ofertas]).T self.demandas = np.asanyarray([demandas]) self.total_sin_asignar = sum(self.demandas[0]) if asignaciones is None: self.asignaciones = np.zeros((dimension,dimension)) else: self.asignaciones = asignaciones if costos is None: self.costos = np.ones((dimension,dimension)) else: self.costos = costos def asignar(self, fil,col, cant): self.asignaciones[fil,col] = cant self.ofertas[fil,0] -= cant self.demandas[0,col] -= cant self.total_sin_asignar -= cant def of_dem_disponibles(self): """ devuelve indicesofertas_disp, indices_demandas_disp """ ixs_ofertas_disp = np.where(~(self.ofertas==0))[0] ixs_demandas_disp = np.where(~(self.demandas==0))[1] return ixs_ofertas_disp, ixs_demandas_disp def disponibles(self, ret_generator =True): f,c = self.of_dem_disponibles() if not ret_generator: return np.array(np.meshgrid(f,c)).T.reshape(-1,2) else: #dudo que yield ayude en algo, pues igual se crea la matriz for pos in np.array(np.meshgrid(f,c)).T.reshape(-1,2): yield pos def acciones_posibles(self): f,c = self.of_dem_disponibles() #tendra algo de sentido llamar un generador luego de llamar a esto? print(" Expandiendo nodo...") cont = 1 todas_combinaciones = np.array(np.meshgrid(f,c)).T.reshape(-1,2) print(" Todas las combinaciones (shape):",todas_combinaciones.shape) print(" Tam. (bytes) de todas las combinaciones (shape):",\ sys.getsizeof(todas_combinaciones)) for x,y in np.array(np.meshgrid(f,c)).T.reshape(-1,2): print(" Nodos enviados desde este estado:",cont) yield x,y,min(self.ofertas[x,0], self.demandas[0,y]) cont += 1 @classmethod def copiar(cls, obj): return cls(obj.demandas.shape[1],\ np.copy(obj.ofertas.flatten()),\ np.copy(obj.demandas.flatten()),\ np.copy(obj.asignaciones)) def __repr__(self): s = '' s += ' '*len('(array([0]), array') + np.array2string(self.demandas.astype(float),separator=', ') + '\n' s += '\n'.join([str(x) for x in zip(self.ofertas,self.asignaciones)]) return s def __hash__(self): return hash(self.asignaciones.tobytes()) def __eq__(self, other): return self.__class__ == other.__class__ and\ np.array_equal(self.asignaciones, other.asignaciones) class Iop2Problem(Problem): def actions(self, state): for accion in state.acciones_posibles(): yield accion def result(self, state, action): nuevo_estado = state.__class__.copiar(state) fil, col, cant = action nuevo_estado.asignar(fil,col,cant) return nuevo_estado def goal_test(self, state): return state.total_sin_asignar <= 0 estado_inicial = EstadoTransporte(4,[4,2,4,5],[2,3,6,4]) problema_iop2 = Iop2Problem(estado_inicial) problema_iop2 from search import depth_first_graph_search, Node from collections import deque class NodeIter: """ El Node original, pero expand devuelve un generador, en vez de una lista A node in a search tree. Contains a pointer to the parent (the node that this is a successor of) and to the actual state for this node. Note that if a state is arrived at by two paths, then there are two nodes with the same state. Also includes the action that got us to this state, and the total path_cost (also known as g) to reach the node. Other functions may add an f and h value; see best_first_graph_search and astar_search for an explanation of how the f and h values are handled. You will not need to subclass this class.""" def __init__(self, state, parent=None, action=None, path_cost=0): """Create a search tree Node, derived from a parent by an action.""" self.state = state self.parent = parent self.action = action self.path_cost = path_cost self.depth = 0 if parent: self.depth = parent.depth + 1 def __repr__(self): return "<Node {}>".format(self.state) def __lt__(self, node): return self.state < node.state def expand(self, problem): """List the nodes reachable in one step from this node.""" for action in problem.actions(self.state): yield self.child_node(problem, action) def child_node(self, problem, action): """[Figure 3.10]""" next_state = problem.result(self.state, action) next_node = NodeIter(next_state, self, action, problem.path_cost(self.path_cost, self.state, action, next_state)) return next_node def solution(self): """Return the sequence of actions to go from the root to this node.""" return [node.action for node in self.path()[1:]] def path(self): """Return a list of nodes forming the path from the root to this node.""" node, path_back = self, [] while node: path_back.append(node) node = node.parent return list(reversed(path_back)) # We want for a queue of nodes in breadth_first_graph_search or # astar_search to have no duplicated states, so we treat nodes # with the same state as equal. [Problem: this may not be what you # want in other contexts.] def __eq__(self, other): return isinstance(other, NodeIter) and self.state == other.state def __hash__(self): return hash(self.state) # + import types class ValGenList(object): """ Lista que es consciente de que contiene generadores... Si se llama a pop y el ultimo elemento es un generador, pop() devolvera el primer elemento del generador !!!cuidado, no es seguro que prevenga si realmente esta vacia """ def __init__(self): self._L = [] def pop(self): #raise NotImplementedError("Esta lista debe funcionar como iterador") if len(self._L) == 0: raise IndexError("pop from empty list") if isinstance(self._L[-1], types.GeneratorType): #print("entré a isinstance") #:deb: try: return next(self._L[-1]) except StopIteration: #print("entré a stopiter") #:deb: self._L.pop() return self.pop() else: #print("entré a else") #:deb: return self._L.pop() def append(self, elem): self._L.append(elem) return def extend(self, other): if isinstance(other, types.GeneratorType): self._L.append(other) else: self._L.extend(other) """def __next__(self): if len(self._L) == 0: raise StopIteration if isinstance(self._L[-1], types.GeneratorType): try: print("Intentamos llamar al siguiente elemento") #:deb: return next(self._L[-1]) except StopIteration: #esto debe manejarse en caso de que el iterador vacio #no sea el ultimo elemento self._L.pop() if len(self._L) == 0: raise return self.__next__() except Exception as e: print("Ay :(") raise else: try: return self._L.pop() except: raise StopIteration""" def __iter__(self): return self def __contains__(self, key): return key in self._L def __bool__(self): return bool(self._L) def __len__(self): return len(self._L) def __str__(self): return self._L.__str__() def __repr__(self): return self._L.__repr__() # - import sys def lazy_depth_first_graph_search(problem, _print=False): """ (Utiliza una pila "holgazana", en vez de que la operacion expandir devuelva todos los hijos, esta devuelve el hijo inmediato junto con un generador para el resto de hijos) El problema es en que no verifica si un hijo ya esta en la frontera Search the deepest nodes in the search tree first. Search through the successors of a problem to find a goal. The argument frontier should be an empty stack. Does not get trapped by loops. If two paths reach a state, only use the first one. [Figure 3.7]""" frontier = ValGenList() frontier.append(NodeIter(problem.initial)) # Stack explored = set() max_tam_frontera = sys.getsizeof(frontier) while frontier: max_tam_frontera = max(max_tam_frontera, sys.getsizeof(frontier._L)) print("frontier len:", len(frontier)) print("frontier size in bytes:",sys.getsizeof(frontier._L)) print("frontier:",frontier) try: node = frontier.pop() except IndexError: break except StopIteration: break print("node:", node) print() if problem.goal_test(node.state): print("max_tam_frontera (bytes):", max_tam_frontera) print("tam_explorados:", len(explored)) return node explored.add(node.state) generador_hijos = (child for child in node.expand(problem) if child.state not in explored and child not in frontier) frontier.extend(generador_hijos) print("max_tam_frontera (bytes):", max_tam_frontera) print("tam_explorados:", len(explored)) return None lazy_depth_first_graph_search(problema_iop2) gol3 = lazy_depth_first_graph_search(problema_iop2) import sys def depth_first_graph_search_print(problem): """Search the deepest nodes in the search tree first. Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. Does not get trapped by loops. If two paths reach a state, only use the first one. [Figure 3.7]""" frontier = [(Node(problem.initial))] # Stack explored = set() max_tam_frontera = sys.getsizeof(frontier) while frontier: print("frontier:",frontier) print("frontier len:",len(frontier)) print("frontier size in bytes:",sys.getsizeof(frontier)) #max_tam_frontera = max(max_tam_frontera, len(frontier)) max_tam_frontera = max(max_tam_frontera, sys.getsizeof(frontier)) node = frontier.pop() print("node:",node) print() if problem.goal_test(node.state): print("max_tam_frontera (bytes):", max_tam_frontera) print("tam_explorados:", len(explored)) return node explored.add(node.state) frontier.extend(child for child in node.expand(problem) if child.state not in explored and child not in frontier) print("max_tam_frontera (bytes):", max_tam_frontera) print("tam_explorados:", len(explored)) return None # + def breadth_first_graph_search_print(problem): """[Figure 3.11] """ node = Node(problem.initial) if problem.goal_test(node.state): print("max_tam_frontera:", 0) print("tam_explorados:", 1) return node frontier = deque([node]) explored = set() max_tam_frontera = 1 while frontier: max_tam_frontera = max(max_tam_frontera, len(frontier)) node = frontier.popleft() explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: if problem.goal_test(child.state): print("max_tam_frontera:", max_tam_frontera) print("tam_explorados:", len(explored)) return child frontier.append(child) print("max_tam_frontera:", max_tam_frontera) print("tam_explorados:", len(explored)) return None # - gol = depth_first_graph_search_print(problema_iop2) gol gol2 = breadth_first_graph_search_print(problema_iop2) gol2 sol = gol.solution()[::-1] sol sol = gol2.solution()[::-1] sol sol = gol3.solution() sol estado_actual = problema_iop2.initial print(estado_actual) print() for accion in sol: print("accion:",accion) estado_siguiente = problema_iop2.result(estado_actual,accion) estado_actual = estado_siguiente print(estado_actual) print()
iop2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # From a design perspective, deep hierarchies of classes can be cumbersome and make change a lot harder since the entire hierarchy has to be taken into account. Python offers a few mechanism to avoid this, and make the class desing leaner. # In order to ensure that all cells in this notebook can be evaluated without errors, we will use `try`-`except` to catch exceptions. To show the actual errors, we need to print the backtrace, hence the following import. from traceback import print_exc # # Duck typing # The idea is that if classes implement object methods with the same signature and semantics, that functionality can be used, regardless of the relationship between the classes, if any. If it looks like a duck, swims like a duck and quacks like a duck, it is probably a duck. # We define two classes that serve completely different purposes. The only thing they share is that they make a sound, and the relevant method for both is `make_sound`. class Duck: _sound: str = 'quack' species: str def make_sound(self): return self._sound def __init__(self, species): self.species = species class Timer: _sound: str = 'beep' time: int def make_sound(self): return self._sound def __init__(self, time): self.time = time # Next, we add and instance of each to a list. stuff = [Duck('mandarin'), Timer(10)] # We can iterate over the list, and regardless of the object's class, invoke the `make_sound` method. for item in stuff: print(f'{type(item)} says {item.make_sound()}') # Note that the sound faculty of these classes is not derived from a common ancestor class by inheritance. # # Mix-in # If the implementation of the common functionality is the same for a number of classes, it is worth defining a mix-in class that defines the implementation. In the examples above, the `make_sound` method implementation is identical for the `Duck` and `Computer` class. Hence we can move the implementation to its own class `SoundMake`. Note that this class has no `__init__` method, and needs none. class SoundMaker: def make_sound(self): if hasattr(self, '_sound'): return self._sound else: raise ValueError(f'{type(self)} does not make sound') # The `Duck`, `Timer` and `Dog` classes now inherit from `SoundMaker`, but `Dog` doesn't define its sound attribute. class Duck(SoundMaker): _sound: str = 'quack' species: str def __init__(self, species): self.species = species class Timer(SoundMaker): _sound: str = 'beep' time: int def __init__(self, time): self.time = time class Dog(SoundMaker): name: str def __init__(self, name): self.name = name stuff = [Duck('mandarin'), Timer(5)] for item in stuff: print(f'{type(item)} says {item.make_sound()}') # Since the `Dog` has no `_sound`, the mix-in method raises an exception. dog = Dog('fido') try: print(dog.make_sound()) except ValueError as error: print_exc()
source-code/object-orientation/avoiding_class_hierarchies.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Setup the Environment # ## With miniconda # Conda is an open-source package management system and environment management system that runs on Windows, macOS, and Linux. Conda quickly installs, runs, and updates packages and their dependencies. Conda easily creates, saves, loads, and switches between environments on your local computer. # Miniconda is a free minimal installer for conda. It is a small, bootstrap version of Anaconda that includes only conda, Python, the packages they depend on, and a small number of other useful packages, including pip, zlib and a few others. # - [Installing miconda](https://docs.conda.io/en/latest/miniconda.html#) # - [Getting started with conda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html#starting-conda) # - Create, then activate the environment: # # ```conda create -n introml``` # # `conda activate introml` # # # - Add community channel [conda-forge](https://conda-forge.org/): # # `conda config --add channels conda-forge` # # `conda config --set channel_priority strict` # # # - Install pakages in your active environment. Be careful! Only use the `conda install` command. Packages from different repositories may conflict and your environment will break. # # `conda install jupyterlab pandas seaborn scikit-learn` # # # ## With virtualenv # If you familiar with python and prefer virtualenv for manage your environments, you can use next commands to setup the environment: # # `cd "path/to/project_folder"` # # `python -m venv env` # # `source env/bin/activate` # # `pip install jupyterlab pandas seaborn scikit-learn` # ## Run jupyterlab # Use `jupyter lab` commad in terminal to run jupyterlab, # or `jupyter notebook` to run jupyter notebook
notebooks/setup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="KgNWMAfrWz8r" colab_type="code" colab={} # http://pytorch.org/ from os.path import exists from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()) # cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/' accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu' # + id="dzhILRnNVS-a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 125.0} outputId="d29a4372-2558-4dad-8417-3469ccdd0ce0" ''' code by <NAME>(<NAME>) @graykode ''' import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import torch.nn.functional as F dtype = torch.FloatTensor # Text-CNN Parameter embedding_size = 2 # n-gram sequence_length = 3 num_classes = 2 # 0 or 1 filter_sizes = [2, 2, 2] # n-gram window num_filters = 3 # 3 words sentences (=sequence_length is 3) sentences = ["i love you", "he loves me", "she likes baseball", "i hate you", "sorry for that", "this is awful"] labels = [1, 1, 1, 0, 0, 0] # 1 is good, 0 is not good. word_list = " ".join(sentences).split() word_list = list(set(word_list)) word_dict = {w: i for i, w in enumerate(word_list)} vocab_size = len(word_dict) inputs = [] for sen in sentences: inputs.append(np.asarray([word_dict[n] for n in sen.split()])) targets = [] for out in labels: targets.append(out) # To using Torch Softmax Loss function input_batch = Variable(torch.LongTensor(inputs)) target_batch = Variable(torch.LongTensor(targets)) class TextCNN(nn.Module): def __init__(self): super(TextCNN, self).__init__() self.num_filters_total = num_filters * len(filter_sizes) self.W = nn.Parameter(torch.empty(vocab_size, embedding_size).uniform_(-1, 1)).type(dtype) self.Weight = nn.Parameter(torch.empty(self.num_filters_total, num_classes).uniform_(-1, 1)).type(dtype) self.Bias = nn.Parameter(0.1 * torch.ones([num_classes])).type(dtype) def forward(self, X): embedded_chars = self.W[X] # [batch_size, sequence_length, sequence_length] embedded_chars = embedded_chars.unsqueeze(1) # add channel(=1) [batch, channel(=1), sequence_length, embedding_size] pooled_outputs = [] for filter_size in filter_sizes: # conv : [input_channel(=1), output_channel(=3), (filter_height, filter_width), bias_option] conv = nn.Conv2d(1, num_filters, (filter_size, embedding_size), bias=True)(embedded_chars) h = F.relu(conv) # mp : ((filter_height, filter_width)) mp = nn.MaxPool2d((sequence_length - filter_size + 1, 1)) # pooled : [batch_size(=6), output_height(=1), output_width(=1), output_channel(=3)] pooled = mp(h).permute(0, 3, 2, 1) pooled_outputs.append(pooled) h_pool = torch.cat(pooled_outputs, len(filter_sizes)) # [batch_size(=6), output_height(=1), output_width(=1), output_channel(=3) * 3] h_pool_flat = torch.reshape(h_pool, [-1, self.num_filters_total]) # [batch_size(=6), output_height * output_width * (output_channel * 3)] model = torch.mm(h_pool_flat, self.Weight) + self.Bias # [batch_size, num_classes] return model model = TextCNN() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # Training for epoch in range(5000): optimizer.zero_grad() output = model(input_batch) # output : [batch_size, num_classes], target_batch : [batch_size] (LongTensor, not one-hot) loss = criterion(output, target_batch) if (epoch + 1) % 1000 == 0: print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss)) loss.backward() optimizer.step() # Test test_text = 'sorry hate you' tests = [np.asarray([word_dict[n] for n in test_text.split()])] test_batch = Variable(torch.LongTensor(tests)) # Predict predict = model(test_batch).data.max(1, keepdim=True)[1] if predict[0][0] == 0: print(test_text,"is Bad Mean...") else: print(test_text,"is Good Mean!!")
2-1.TextCNN/TextCNN_Torch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- l = ['others', 'others', 'others', 'others', 'docker', 'docker', 'elasticsearch_template', 'elasticsearch_template', 'elasticsearch_template', 'elasticsearch_template', 'java', 'java', 'java', 'java', 'java', 'java', 'java', 'java', 'java', 'java', 'java', 'java', 'java', 'java', 'js', 'python', 'python', 'python', 'python', 'python', 'python', 'python', 'python', 'python', 'python', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'shells', 'simulate_keypress', 'simulate_keypress', 'sql', 'sql', 'sql', ] dirlist= list(set(l)) flist = ["./others/flush_dns_cache.md", "./others/jboss_dynamic_host_rewrite.xml", "./others/jupyter_shortcuts.md", "./others/nusoap_utf8_response.php", "./docker/docker_commads.sh", "./docker/docker_swarm.sh", "./elasticsearch_template/ngram_template.json", "./elasticsearch_template/ngram_template_curl.sh", "./elasticsearch_template/phonetic_analyzer_template.json", "./elasticsearch_template/phonetic_analyzer_template_curl.sh", "./java/CheckPolymorphism.java", "./java/FunFunctionExperiment.java", "./java/GsonHelper.java", "./java/java.gitignore.collection", "./java/maven_copy_dependencies_to_lib.xml", "./java/maven_shade_manifest_main_class.xml", "./java/maven_single_fat_jar.xml", "./java/mysql_connection_for_spring.properties", "./java/SieveGenerator.java", "./java/Singleton.java", "./java/SmtpMailManager.java", "./java/spring-boot-swagger-config.md", "./java/ThreadSafeSingleton.java", "./java/Trie.java", "./js/detect_change_in_form.js", "./python/base64EncDec.py", "./python/checkAvailIp.py", "./python/generatePermutationCombination.py", "./python/improve_pure_functool.py", "./python/inversion_count.py", "./python/kill_java_process_on_windows.py", "./python/local_fileserver.py", "./python/merge_sort.py", "./python/OraclePagination.py", "./python/trie.py", "./shells/append_file_to_another.sh", "./shells/check_meltdown_ubuntu.sh", "./shells/check_start_process.pl", "./shells/delete_file_and_folder_with_pattern.sh", "./shells/deploy_jar_to_maven.sh", "./shells/exportImportMySQLdb.sh", "./shells/find_file_with_size.sh", "./shells/find_list_of_files_directory.sh", "./shells/find_text_in_folder.sh", "./shells/folder_info.sh", "./shells/install_python2_krnl_conda.sh", "./shells/loop_between_files_in_folder.sh", "./shells/lscpu_monitor.sh", "./shells/mysql_create_user.sh", "./shells/mysql_install_config.sh", "./shells/os_info.sh", "./shells/process_info.sh", "./shells/rsync_copy_exclude_dir.sh", "./shells/sha256_csr_with_openssl.sh", "./shells/smlink_win.bat", "./shells/start_ubuntu_in_terminal.sh", "./shells/update_server_time_from_ntp.sh", "./shells/video_to_audio_ubuntu.sh", "./shells/word_count_in_a_file.sh", "./simulate_keypress/simulate.html", "./simulate_keypress/simulate_keypress.js", "./sql/detect_duplicate_value.sql", "./sql/grantAccessMysql.sql", "./sql/update_column_seq_val.sql",] # + dirlist= list(set(l)) flist = sorted(flist) flist current = "" for f in flist: for d in dirlist: if f.startswith("./{0}".format(d)): current = d print("\n### [" + current + "](./" + current + "/)\n") dirlist.remove(d) fileName = f.strip()[len(current)+3:] print("* [{0}]({1})".format(fileName, f)) # -
python/scripts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Deciscion Trees and Ensemble Methods # + slideshow={"slide_type": "slide"} # %matplotlib inline import pandas as pd import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier from sklearn.tree import plot_tree, export_text # + slideshow={"slide_type": "slide"} bg_df = pd.read_csv('data/boardgames.csv') bg_df.sample(5) # + slideshow={"slide_type": "slide"} dtc = DecisionTreeClassifier(max_depth = 3) # Split the sample only three times. # + slideshow={"slide_type": "slide"} x_names = ['max_players', 'min_players', 'min_playtime', 'max_playtime', 'min_age'] dtc.fit(bg_df[x_names], bg_df['quality_game']) # + slideshow={"slide_type": "slide"} plt.figure(figsize=(16,8)) plot_tree(dtc, feature_names = x_names, fontsize = 10, filled =True) # + slideshow={"slide_type": "slide"} print(export_text(dtc, feature_names = x_names)) # + slideshow={"slide_type": "slide"} from sklearn.ensemble import RandomForestClassifier # + slideshow={"slide_type": "slide"} rf = RandomForestClassifier(max_depth =3 ) rf # + slideshow={"slide_type": "slide"} rf.fit(bg_df[x_names], bg_df['quality_game']) # - len(rf.estimators_) print(export_text(rf.estimators_[5], feature_names = x_names)) plt.figure(figsize=(16,8)) plot_tree(rf.estimators_[-2], feature_names = x_names, fontsize = 10, filled =True) # + slideshow={"slide_type": "slide"} imp = pd.DataFrame(rf.feature_importances_, index = x_names) imp # - # <div class="alert alert-info"> # <h3> Your Turn</h3> # <p> Run the model again chaning the max_depth parameter. What does it mean to change this parameter? How do the results vary? # </div> # # # <div class="alert alert-info"> # <h3> Your Turn</h3> # <p> Find the documentation for this function. What other parameters are available? Add one to your model. # </div> # # # # <div class="alert alert-info"> # <h3> Your Turn</h3> # <p> Produce predicted values from your model and evaluate the accuracy score. # </div> # # # # # ## Time to supersize things # + categories = ['category_cardgame', 'category_wargame', 'category_fantasy', 'category_dice', 'category_partygame', 'category_fighting', 'category_sciencefiction', 'category_abstractstrategy', 'category_economic', 'category_childrensgame', 'category_worldwarii', 'category_bluffing', 'category_animals', 'category_humor', 'category_actiondexterity', 'category_adventure', 'category_moviestvradiotheme', 'category_medieval', 'category_deduction', 'category_miniatures'] mechanics = ['mechanic_dicerolling', 'mechanic_handmanagement', 'mechanic_hexandcounter', 'mechanic_setcollection', 'mechanic_variableplayerpowers', 'mechanic_none', 'mechanic_tileplacement', 'mechanic_modularboard', 'mechanic_carddrafting', 'mechanic_rollspinandmove', 'mechanic_areacontrolareainfluence', 'mechanic_auctionbidding', 'mechanic_simulation', 'mechanic_areamovement', 'mechanic_simultaneousactionselection', 'mechanic_actionpointallowancesystem', 'mechanic_cooperativeplay', 'mechanic_pointtopointmovement', 'mechanic_partnerships', 'mechanic_memory'] # - x_names = ['complexity', 'max_players', 'min_players', 'min_playtime', 'max_playtime', 'min_age'] many_xs = x_names + mechanics + categories # + slideshow={"slide_type": "slide"} from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline param_dist = {"max_features": [4, 7], "min_samples_split": [10], "n_estimators" : [100, 200]} rfgs = GridSearchCV( RandomForestClassifier(), param_dist, cv = 5, n_jobs = -1, verbose=1 ) # + slideshow={"slide_type": "slide"} rfgs.fit(bg_df[many_xs], bg_df['quality_game']) # + slideshow={"slide_type": "slide"} rfgs.best_estimator_ # + slideshow={"slide_type": "slide"} rf_best = rfgs.best_estimator_ pd.DataFrame(rf_best.feature_importances_, index = many_xs).sort_values(by = 0, ascending=False) # - print(export_text(rf_best.estimators_[11], feature_names = many_xs)) # <div class="alert alert-info"> # <h3> Your Turn</h3> # <p> Work with your group to find a best fitting model. Compare the accuracy with of logistic regression. # </div> # # # # # # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-info"> # <h3> Bonus Challenge</h3> # # <p> Bonus challenge: Use both features in the data set and ones you construct from a topic model! # # </div> # # -
Notebooks/Uncategorized/11_Random-Forest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Set parameters input_fld = 'input_fld_path' weight_file = 'kerasmodel_file_name located inside input_fld' num_output = 1 write_graph_def_ascii_flag = True prefix_output_node_names_of_final_network = 'output_node' output_graph_name = 'constant_graph_weights.pb' # # initialize # + from keras.models import load_model import tensorflow as tf import os import os.path as osp from keras import backend as K output_fld = input_fld + 'tensorflow_model/' if not os.path.isdir(output_fld): os.mkdir(output_fld) weight_file_path = osp.join(input_fld, weight_file) # - # # Load keras model and rename output # + net_model = load_model(weight_file_path) K.set_learning_phase(0) pred = [None]*num_output pred_node_names = [None]*num_output for i in range(num_output): pred_node_names[i] = prefix_output_node_names_of_final_network+str(i) pred[i] = tf.identity(net_model.output[i], name=pred_node_names[i]) print('output nodes names are: ', pred_node_names) # - # #### [optional] write graph definition in ascii # + sess = K.get_session() if write_graph_def_ascii_flag: f = 'only_the_graph_def.pb.ascii' tf.train.write_graph(sess.graph.as_graph_def(), output_fld, f, as_text=True) print('saved the graph definition in ascii format at: ', osp.join(output_fld, f)) # - # #### convert variables to constants and save from tensorflow.python.framework import graph_util from tensorflow.python.framework import graph_io constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names) graph_io.write_graph(constant_graph, output_fld, output_graph_name, as_text=False) print('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))
.ipynb_checkpoints/keras_to_tensorflow-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def hello(): print("hello") print("glad to see you") hello() print("we are going to agra") hello() # + def hello(): print("Hello") print("Glad to meet you") hello() print("It works") hello() hello() # - verbs = ["kayak", "cry", "walk", "eat", "drink", "fly"] ing=[] for i in verbs: i += 'ing' ing.append(i) print(ing) def hello(s,): print("hello ,I am ",s) print("Welcome") hello('kajal') hello("Singh") # + def hello(n,s): getting = "hello{}".format(n) print(getting*s) hello(" kitty " ,4) hello("",1) hello(" wei ", 2) # + def hello2(s): print("Hello "+s) print("Glad to meet you") hello2("Iman" + " and Jackie") hello2("Class " * 3) # - def hello(x): result = x*x return result y = 9 resulting = hello(y) print("The square of {} is the function is {}".format(y,resulting)) def hello(): print("hello") return 5 print("hyyyyyyy") return 10 x = hello() print(x) # + def longer_than_five(list_of_names): for name in list_of_names: # iterate over the list to look at each name if len(name) > 5: # as soon as you see a name longer than 5 letters, return True # then return True! # If Python executes that return statement, the function is over and the rest of the code will not run -- you already have your answer! return False # You will only get to this line if you # iterated over the whole list and did not get a name where # the if expression evaluated to True, so at this point, it's correct to return False! # Here are a couple sample calls to the function with different lists of names. Try running this code in Codelens a few times and make sure you understand exactly what is happening. list1 = ["Sam","Tera","Sal","Amita"] list2 = ["Rey","Ayo","Lauren","Natalie"] print(longer_than_five(list1)) print(longer_than_five(list2)) # + def square(x): y = x * x return y print(square(5) + square(5)) # - # 8. Write a function named same that takes a string as input, and simply returns that string. def same(x): return x same("hello") # 11. Write a function called change that takes one number as its input and returns that number, plus 7. def change(x): return x+7 change(1) # 10. Write a function called subtract_three that takes an integer or any number as input, and returns that number minus three. def subtract_three(x): return x - 3 subtract_three(5) # # + def mylen(seq): c = 0 # initialize count variable to 0 for _ in seq: c = c + 1 # increment the counter for each item in seq return c print(mylen("hello")) print(mylen([1, 2, 7])) # - # 1. Write a function named total that takes a list of integers as input, and returns the total value of all those integers added together. # # # + def total(tot): num = 0 for i in tot: num = num + i return num result = total([2,3,4]) print(result) # - # 2. Write a function called count that takes a list of numbers as input and returns a count of the number of elements in the list. # # # + def count(x): assum = 0 for i in x: assum = assum + 1 return assum result = count([1,2,3,4,56,7]) print(result) # - def accum(x): return x+x accum(10) # Write a function, accum, that takes a list of integers as input and returns the sum of those integers. # # def accum(lst): assum = 0 for i in lst: assum = assum + i return assum result = [1,2,3,4,5,6,7,8,9] accum(result) # + def square(x): y = x ** x return y def sum_of_square(x,y,z): a = square(x) b = square(y) c = square(z) return x+y+z a=2 b= 3 c = 4 result = sum_of_square(a,b,c) print(result) # + def square(x): y = x * x return y def sum_of_squares(x,y,z): a = square(x) b = square(y) c = square(z) return a+b+c a = -5 b = 2 c = 10 result = sum_of_squares(a,b,c) print(result) # + def most_common_letter(s): frequencies = count_freqs(s) return best_key(frequencies) def count_freqs(st): d = {} for c in st: if c not in d: d[c] = 0 d[c] = d[c] + 1 return d def best_key(dictionary): ks = dictionary.keys() best_key_so_far = list(ks)[0] # Have to turn ks into a real list before using [] to select an item for k in ks: if dictionary[k] > dictionary[best_key_so_far]: best_key_so_far = k return best_key_so_far print(most_common_letter("abbbbbbbbbbbccccddddd")) # - # Write two functions, one called addit and one called mult. addit takes one number as an input and adds 5. mult takes one number as an input, and multiplies that input by whatever is returned by addit, and then returns the result. # # # + def addit(x): y = x + 5 return y def mult(x): a = addit(x) return a*x z = 3 result = addit(z) print(result) # + def pow(b, p): y = b ** p return y def square(x): a = pow(x, 2) return a n = 5 result = square(n) print(result) # + def square(x): return x*x def g(y): return y + 3 def h(y): return square(y) + 3 print(h(2)) # + def square(x): return x*x def g(y): return y + 3 def h(y): return square(y) + 3 print(g(h(2))) # + def double(y): y = 2 * y def changeit(lst): lst[0] = "Michigan" lst[1] = "Wolverines" y = 5 double(y) print(y) mylst = ['our', 'students', 'are', 'awesome'] changeit(mylst) print(mylst) # + def double(n): n = 2 * n def changeit(lst): lst[0] = "Michigan" lst[1] = "Wolverines" y = 5 double(y) print(y) mylst = ['106', 'students', 'are', 'awesome'] changeit(mylst) print(mylst) # + def double(n): global y y = 2 * n y = 5 double(y) print(y) # + def double(n): return 2 * n y = 5 y = double(y) print(y) # + def changeit(lst): lst[0] = "Michigan" lst[1] = "Wolverines" return lst mylst = ['106', 'students', 'are', 'awesome'] newlst = changeit(list(mylst)) print(mylst) print(newlst) # - # Write a function called change that takes any string, adds “Nice to meet you!” to the end of the argument given, and returns that new string. # # # + def change(v): return v+'Nice to meet you!' v=input("Enter the string: ") change(v) # - # Write a function, length, that takes in a list as the input. If the length of the list is greater than or equal to 5, return “Longer than 5”. If the length is less than 5, return “Less than 5”. # # def length(lst): if len(lst)>=5: return "Longer than 5" else: return "Less than 5" lst = [1,2,3,5,6] ls1 = [1,2,3] print(length(lst)) print(length(ls1)) # You will need to write two functions for this problem. The first function, divide that takes in any number and returns that same number divided by 2. The second function called sum should take any number, divide it by 2, and add 6. It should return this new number. You should call the divide function within the sum function. Do not worry about decimals. # # def divide(x): return x/2 def sum(x): return x/2 + 6 y = 10 sum(divide(y)) julia = ("Julia", "Roberts", 1967, "Duplicity", 2009, "Actress", "Atlanta,Georgia") # or equivalently julia = "Julia", "Roberts", 1967, "Duplicity", 2009, "Actress", "Atlanta, Georgia" print(julia[6]) # + #Create a tuple called practice that has four elements: ‘y’, ‘h’, ‘z’, and ‘x’. practice = ('y','h','z','x') print(practice) #3. Create a tuple named tup1 that has three elements: ‘a’, ‘b’, and ‘c’. tup1 = ("a","b","c") print(tup1) # - # 4. Provided is a list of tuples. Create another list called t_check that contains the third element of every tuple. # # # + lst_tups = [('Articuno', 'Moltres', 'Zaptos'), ('Beedrill', 'Metapod', 'Charizard', 'Venasaur', 'Squirtle'), ('Oddish', 'Poliwag', 'Diglett', 'Bellsprout'), ('Ponyta', "Farfetch'd", "Tauros", 'Dragonite'), ('Hoothoot', 'Chikorita', 'Lanturn', 'Flaaffy', 'Unown', 'Teddiursa', 'Phanpy'), ('Loudred', 'Volbeat', 'Wailord', 'Seviper', 'Sealeo')] t_check = [] for i in lst_tups: t_check.append(i[2]) # - # 5. Below, we have provided a list of tuples. Write a for loop that saves the second element of each tuple into a list called seconds. # # # + tups = [('a', 'b', 'c'), (8, 7, 6, 5), ('blue', 'green', 'yellow', 'orange', 'red'), (5.6, 9.99, 2.5, 8.2), ('squirrel', 'chipmunk')] seconds = [] for i in tups: seconds.append(i[1]) # - authors = [('Paul', 'Resnick'), ('Brad', 'Miller'), ('Lauren', 'Murphy')] for first_name, last_name in authors: print("first name:", first_name, "last name:", last_name) fruits = ['apple', 'pear', 'apricot', 'cherry', 'peach'] for n in range(len(fruits)): print(n, fruits[n]) fruits = ['apple', 'pear', 'apricot', 'cherry', 'peach'] for item in enumerate(fruits): print(item[0], item[1]) # With only one line of code, assign the variables water, fire, electric, and grass to the values “Squirtle”, “Charmander”, “Pikachu”, and “Bulbasaur” # # water , fire , electric , grass = "Squirtle" , "Charmander","Pikachu","Bulbasaur" # With only one line of code, assign four variables, v1, v2, v3, and v4, to the following four values: 1, 2, 3, 4. # # v1 ,v2 ,v3 ,v4 = 1,2,3,4 # The .items() method produces a sequence of key-value pair tuples. With this in mind, write code to create a list of keys from the dictionary track_medal_counts and assign the list to the variable name track_events. Do NOT use the .keys() method. # If you remember, the .items() dictionary method produces a sequence of tuples. Keeping this in mind, we have provided you a dictionary called pokemon. For every key value pair, append the key to the list p_names, and append the value to the list p_number. Do not use the .keys() or .values() methods. # + pokemon = {'Rattata': 19, 'Machop': 66, 'Seel': 86, 'Volbeat': 86, 'Solrock': 126} number = {} for first , second in pokemon: print(number.append(first)) print(number.append(second)) # + def add(x, y): return x + y print(add(3, 4)) z = (5, 4) print(add(z)) # this line causes an error # + def add(x, y): return x + y print(add(3, 4)) z = (5, 4) print(add(*z)) # this line will cause the values to be unpacked # - # With only one line of code, assign the variables city, country, and year to the values of the tuple olymp. # # # + olymp = ('Rio', 'Brazil', 2016) city,country,year = olymp # - # Define a function called info with five parameters: name, gender, age, bday_month, and hometown. The function should then return a tuple with all five parameters in that order. # # def info (name,gender,age,bday_month,hometown): f = (name,gender,age,bday_month,hometown) return f info("kajal singh","female",19,4,"Bihar") # Given is the dictionary, gold, which shows the country and the number of gold medals they have earned so far in the 2016 Olympics. Create a list, num_medals, that contains only the number of medals for each country. You must use the .items() method. Note: The .items() method provides a list of tuples. Do not use .keys() method. # # # + gold = {'USA':31, 'Great Britain':19, 'China':19, 'Germany':13, 'Russia':12, 'Japan':10, 'France':8, 'Italy':8} num_medals = [] for i in gold.items(): num_medals.append(i[1]) # -
Function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from matplotlib import pyplot as plt from scipy.fft import fft data = np.genfromtxt('BXBanguiJuly20.dat', delimiter='\t') print (data) print(len(data)) print(type(data)) a=fft(data) print(a) b=np.abs(a) print(b) plt.plot(np.log(b)) from scipy import signal import matplotlib.pyplot as plt t = np.linspace(-1, 1, 200, endpoint=False) #sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2) sig=data widths = np.arange(1, len(data)) cwtmatr = signal.cwt(sig, signal.ricker, widths) print(cwtmatr) plt.imshow(cwtmatr, extent=[-1, 1440, 1, len(data)], cmap='PRGn', aspect='auto', vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) plt.show()
wavelet_transform.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 学習CSV作成 # + import glob import pandas as pd # + images = [] labels = [] def make_csv(label): path = "../state-farm-distracted-driver-detection/imgs/train/" + label + "/*.jpg" for f in glob.glob(path): img = f.split("\\")[-1] images.append(img) labels.append(label[1:]) # c0->0 make_csv("c0") make_csv("c1") make_csv("c2") make_csv("c3") make_csv("c4") make_csv("c5") make_csv("c6") make_csv("c7") make_csv("c8") make_csv("c9") # - print(len(images)) print(len(labels)) train = pd.DataFrame(columns=["image", "label"]) train["image"] = images train["label"] = labels # csv書き出し train.to_csv("../state-farm-distracted-driver-detection/train.csv", index=False)
notebook/Make_Train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/AmitHasanShuvo/Machine-Learning-Projects/blob/master/Experiment_with_Filters_and_Pools.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="QZPnLVad1gF_" colab_type="code" colab={} import cv2 import numpy as np from scipy import misc i = misc.ascent() # + [markdown] id="kWcWrXuu1114" colab_type="text" # Given Image # # + id="Dkm28VPG1mqt" colab_type="code" outputId="166ca9da-0d3e-4bde-fa15-003203c4ce8c" colab={"base_uri": "https://localhost:8080/", "height": 248} import matplotlib.pyplot as plt plt.grid(False) plt.gray() plt.axis('off') plt.imshow(i) plt.show() # + id="NCIElhql1ojS" colab_type="code" colab={} i_transformed = np.copy(i) size_x = i_transformed.shape[0] size_y = i_transformed.shape[1] # + [markdown] id="9dvV0jLM1_LX" colab_type="text" # Creating 3x3 filters # + id="T_4335881qXA" colab_type="code" colab={} # This filter detects edges nicely # It creates a convolution that only passes through sharp edges and straight # lines. #Experiment with different values for fun effects. #filter = [ [0, 1, 0], [1, -4, 1], [0, 1, 0]] # A couple more filters to try for fun! filter = [ [-1, -2, -1], [0, 0, 0], [1, 2, 1]] #filter = [ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1]] # If all the digits in the filter don't add up to 0 or 1, you # should probably do a weight to get it to do so # so, for example, if your weights are 1,1,1 1,2,1 1,1,1 # They add up to 10, so you would set a weight of .1 if you want to normalize them weight = 1 # + [markdown] id="8Ky5ixhp2GCL" colab_type="text" # Creating a convolution # + id="MLJngE2n1sYj" colab_type="code" colab={} for x in range(1,size_x-1): for y in range(1,size_y-1): convolution = 0.0 convolution = convolution + (i[x - 1, y-1] * filter[0][0]) convolution = convolution + (i[x, y-1] * filter[0][1]) convolution = convolution + (i[x + 1, y-1] * filter[0][2]) convolution = convolution + (i[x-1, y] * filter[1][0]) convolution = convolution + (i[x, y] * filter[1][1]) convolution = convolution + (i[x+1, y] * filter[1][2]) convolution = convolution + (i[x-1, y+1] * filter[2][0]) convolution = convolution + (i[x, y+1] * filter[2][1]) convolution = convolution + (i[x+1, y+1] * filter[2][2]) convolution = convolution * weight if(convolution<0): convolution=0 if(convolution>255): convolution=255 i_transformed[x, y] = convolution # + [markdown] id="bU0L_hbu2OjH" colab_type="text" # effect of the convolution # + id="UdcQEBOo1uru" colab_type="code" outputId="6fa9f671-b34a-4a4a-9d6f-38d5881cd1c7" colab={"base_uri": "https://localhost:8080/", "height": 269} # Plot the image. Note the size of the axes -- they are 512 by 512 plt.gray() plt.grid(False) plt.imshow(i_transformed) #plt.axis('off') plt.show() # + [markdown] id="HskTYj-W2T-5" colab_type="text" # Pooling # + id="mlUQVrAt1wSS" colab_type="code" outputId="79079b03-e7fd-4134-a825-0aa45b8bbfbd" colab={"base_uri": "https://localhost:8080/", "height": 269} new_x = int(size_x/2) new_y = int(size_y/2) newImage = np.zeros((new_x, new_y)) for x in range(0, size_x, 2): for y in range(0, size_y, 2): pixels = [] pixels.append(i_transformed[x, y]) pixels.append(i_transformed[x+1, y]) pixels.append(i_transformed[x, y+1]) pixels.append(i_transformed[x+1, y+1]) newImage[int(x/2),int(y/2)] = max(pixels) # Plot the image. Note the size of the axes -- now 256 pixels instead of 512 plt.gray() plt.grid(False) plt.imshow(newImage) #plt.axis('off') plt.show() # + [markdown] id="NpVEJqvI2W16" colab_type="text" # Refernces: # 1. https://lodev.org/cgtutor/filtering.html (Pending) # 2. https://github.com/lmoroney/dlaicourse/blob/master/Course%201%20-%20Part%206%20-%20Lesson%203%20-%20Notebook.ipynb # + id="VcHxfrbu1yf6" colab_type="code" colab={}
Time Series Analysis/Experiment_with_Filters_and_Pools.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Introduction to reproducibility and power issues # + [markdown] slideshow={"slide_type": "fragment"} # ### Acknowledgements: # # This notebook borrows significantly from works by : # # * <NAME> # * <NAME> # # and from a previous presentation at the Despo lab. # # + [markdown] slideshow={"slide_type": "slide"} # ## Outline # + [markdown] slideshow={"slide_type": "fragment"} # 1. Significance, evidence and causes of non reproducibility # 2. A quick questionnaire # 3. Power issues: definition # 4. Button et al., 2013 # 5. Friston et al., 2012, and Yarkoni's answers # + slideshow={"slide_type": "skip"} from IPython.display import Image as Image # + slideshow={"slide_type": "slide"} print("<NAME>. (2013). Revised standards for statistical evidence. " + "PNAS 110, 19313–19317.") Image('./figures/johnson_PNAS_2013_significance.png') # + slideshow={"slide_type": "slide"} print("\n The correspondence between P values and Bayes factors based on \ \n UMPBTs suggest that commonly used thresholds for statistical \ \n significance represent only moderate evidence against null \ \n hypotheses. \n") Image('./figures/johnson_PNAS_2013_pval_bayefct.png') # + [markdown] slideshow={"slide_type": "slide"} # ## Evidence and causes of non reproducibility # + [markdown] slideshow={"slide_type": "subslide"} # * In general # - Nature "Reducing our irreproducibility", 2013 # * In pre-oncology # - Begley and Ellis, 2011 # * In psychological science # - Reproducibility Project: Psychology osf.io/ezcuj/wiki/home/ # - some response to this (<NAME>, Harvard) # * In Genetics # - GWAS (Ioannidis 2007) # - Stein et al., 2012 # * In neuroimaging # - Raemaekers 2007, Thirion 2007, Carp 2013, etc # * In Imaging Genetics # - Molendijk 2012, Meir 2008 # + slideshow={"slide_type": "slide"} print("from Molendijk 2012") Image('./figures/molendijk_2012_f4.png') # + [markdown] slideshow={"slide_type": "slide"} # * __Analyses are getting complex__ # * Data acquisition # * Linking with other data # * Data size # * Methods # # # * __Publication pressure is high__ # * I don't think I need to argue this here : Benefice / Risk in favor of quick publications # # # * __Mistakes are done__ # * "The scientific method’s central motivation is the ubiquity of error — the awareness that mistakes and self-delusion can creep in absolutely anywhere and that the scientist’s effort is primarily expended in recognizing and rooting out error." _Donoho, 2009._ # # * Anatomy of an error # # # * __Power issues__ # # + [markdown] slideshow={"slide_type": "slide"} # ## Causes of non reproducibility: Ioannidis 2005, Plos One # + [markdown] slideshow={"slide_type": "fragment"} # ### Corollary 1: The smaller the studies conducted in a scientific field, the less likely the research findings are to be true. # + [markdown] slideshow={"slide_type": "fragment"} # ### Corollary 2: The smaller the effect sizes in a scientific field, the less likely the research findings are to be true. # + [markdown] slideshow={"slide_type": "fragment"} # ### Corollary 3: The greater the number and the lesser the selection of tested relationships in a scientific field, the less likely the research findings are to be true. # + [markdown] slideshow={"slide_type": "fragment"} # ### Corollary 4: The greater the flexibility in designs, definitions, outcomes, and analytical modes in a scientific field, the less likely the research findings are to be true. # + [markdown] slideshow={"slide_type": "subslide"} # See: # <NAME>., <NAME>., and <NAME>. (2011). False-Positive Psychology: Undisclosed Flexibility in Data Collection and Analysis Allows Presenting Anything as Significant. Psychological Science 22, 1359–1366. # + slideshow={"slide_type": "fragment"} Image('./figures/simons_table.png') # + [markdown] slideshow={"slide_type": "slide"} # ### Corollary 5: The greater the financial and other interests and prejudices in a scientific field, the less likely the research findings are to be true # + [markdown] slideshow={"slide_type": "slide"} # ## The questionnaire of Westover 2011 for medical students # + [markdown] slideshow={"slide_type": "fragment"} # Consider a typical medical research study, for example designed to test the efficacy of a drug, in which a null hypothesis H0 (’no effect’) is tested against an alternative hypothesis H 1 (’some effect’). Suppose that the study results pass a test of statistical significance (that is P-value<0.05) in favor of H1. What has been shown? # # # <NAME>., <NAME>., and <NAME>. (2011). Significance testing as perverse probabilistic reasoning. BMC Medicine 9, 20. # # + [markdown] slideshow={"slide_type": "fragment"} # 1. H0 is false. # 2. H1 is true. # 3. H0 is probably false. # 4. H1 is probably true. # 5. Both (1) and (2). # 6. Both (3) and (4). # 7. None of the above. # + [markdown] slideshow={"slide_type": "fragment"} # ### Corollary 6: The hotter a scientific field (with more scientific teams involved), the less likely the research findings are to be true. # + [markdown] slideshow={"slide_type": "slide"} # ## Some Definitions # + [markdown] slideshow={"slide_type": "subslide"} # * $H_0$ : null hypothesis: The effect we are testing for is null # # * $H_A$ : alternative hypothesis : Not $H_0$ # # * $T_S$ : Test is significant (eg $T = T_S$) # # * $T_N$ : Test is not significant (eg $T = T_N$) # # * $\alpha$ : false positive rate - probability to reject $H_0$ when $H_0$ is true (therefore $H_A$ is false) # # * $\beta$ : false negative rate - probability to accept $H_0$ when $H_A$ is true ($H_0$ is false) # # + [markdown] slideshow={"slide_type": "subslide"} # power = $1-\beta$ # # where $\beta$ is the risk of *false negative* # # So, to compute power, *we need to know what is the risk of false negative*, ie, the risk to not show a significant effect while we have some signal (null is false). # + slideshow={"slide_type": "skip"} import matplotlib.pyplot as plt # %matplotlib inline import numpy as np import scipy.stats as sst # + slideshow={"slide_type": "skip"} from sympy import symbols, Eq, solve, simplify, lambdify, init_printing, latex init_printing(use_latex=True, order='old') # + slideshow={"slide_type": "skip"} from IPython.display import HTML # Code to make HTML for a probability table def association_table(assocs, title): """ parameters: ----------- assocs: dict keys are ... values are ... title: string The table title """ latexed = {'title': title} for key, value in assocs.items(): latexed[key] = latex(value) latexed['s_total'] = latex(assocs['t_s'] + assocs['f_s']) latexed['ns_total'] = latex(assocs['t_ns'] + assocs['f_ns']) return """<h2>{title}</h2> <TABLE> <TR><TH> $ H / T $ <TH> $T_S$ <TH>$T_N$ <TR><TH> $H_A$ <TD> ${t_s}$ <TD> ${t_ns}$ <TR><TH> $H_0$ <TD> ${f_s}$ <TD> ${f_ns}$ <TR><TH> Total <TD> ${s_total}$ <TD> ${ns_total}$ </TABLE>""".format(**latexed) # + slideshow={"slide_type": "slide"} from sympy.abc import alpha, beta # get alpha, beta symbolic variables assoc = dict(t_s = 1 - beta, # H_A true, test significant = true positives t_ns = beta, # true, not significant = false negatives f_s = alpha, # false, significant = false positives f_ns = 1 - alpha) # false, not sigificant = true negatives HTML(association_table(assoc, 'Not considering prior')) # + [markdown] slideshow={"slide_type": "slide"} # ## How do we compute power ? # + [markdown] slideshow={"slide_type": "fragment"} # ### What is the effect ? # # #### An example using the mean of two groups: # # $$\hspace{3cm}\mu = \mu_1 - \mu_2$$ # + [markdown] slideshow={"slide_type": "fragment"} # ### What is the standardized effect ? (eg Cohen's d) # # #### With $\sigma$ the standard deviation of the data (assume the two groups have same $\sigma$ # # $$\hspace{3cm}d = \frac{\mu_1 - \mu_2}{\sigma} = \frac{\mu}{\sigma}$$ # + [markdown] slideshow={"slide_type": "fragment"} # ### "Z" : Effect accounting for the sample size # # $$\hspace{3cm}Z = \frac{\mu}{\sigma / \sqrt{n}}$$ # + [markdown] slideshow={"slide_type": "slide"} # ### Cohen's d value: # + slideshow={"slide_type": "fragment"} # print some cohen values muse = np.asarray((.05, .1,.2,.3,.4,.5)) sigmas = np.linspace(1.,.5,len(muse)) cohenstr = ["For sigma = {0:3.2f} and m = {1:3.2f} Cohen d = {2:3.2f}".format(sig,mu,coh) for (sig, mu, coh) in zip(sigmas, muse, muse/sigmas)] for s in cohenstr: print(s) # + [markdown] slideshow={"slide_type": "fragment"} # We have to estimate the effect $\mu$, say under some normal noise. Our statistic will be: # # $$ # t = \frac{\hat{\mu}}{\hat{\sigma_{\mu}}} = \frac{\hat{\mu}}{\hat{{SE}_{\mu}}} # $$ # + [markdown] slideshow={"slide_type": "slide"} # Power is the probability that the observed t is greater than $t_{.05}$, computing $t_{.05}$ by assuming that we are under the null. # # So, we compute $t_{.05}$, and want to compute $P(t > t_{.05})$. # # To compute this, __we need the distribution of our measured t - therefore we need to know the signal / effect size !__ # # Let's assume we know this and call it $t_{nc}$, and $F_{nc}$ for the cumulative distribution (more on this in the appendix). # # $\mbox{Power} = 1 - \beta = P(t > t_{.05}) = 1 - F_{nc}(t_{.05})$ # # + [markdown] slideshow={"slide_type": "fragment"} # __This power will depend on 4 parameters :__ # # $$ \mbox{The non standardized effect : } \mu$$ # # $$\mbox{The standard deviation of the data : } \sigma$$ # # $$\mbox{The number of subjects : } n$$ # # $$\mbox{The type I risk of error : } \alpha$$ # # And on the distribution of the statistic under the alternative hypothesis. Here, we assume our original data are normals, and the $t = \frac{\hat{\mu}}{\hat{{SE}_{\mu}}}$ statistics follows a non central t distribution with non centrality parameter # # $$\theta = \mu \sqrt{n}/\sigma$$ # # and $n-1$ degrees of freedom. # + slideshow={"slide_type": "skip"} import scipy.stats as sst import numpy as np import matplotlib.pyplot as plt from __future__ import division # + # plot power as a function of n : define a little function that # takes n, mu, sigma, alpha, and report n. # Optionally plot power as a function of nfrom matplotlib.patches import Polygon from matplotlib.patches import Polygon def stat_power(n=16, mu=1., sigma=1., alpha=0.05, plot=False, xlen=500): """ This function computes the statistical power of an analysis assuming a normal distribution of the data with a one sample t-test Parameters: ----------- n: int, The number of sample in the experiment mu: float The mean of the alternative sigma: float The standard deviation of the alternative plot: bool Plots something if true alpha: float The (type I) risk of error xlen: int Number of points for the display Returns: -------- float The statistical power for this number of sample, mu, sigma, alpha """ # Assume we need to estimate the mean - degrees of freedom is n-1 df = n-1 # The non centrality parameter is mu - # see https://en.wikipedia.org/wiki/Noncentral_t-distribution theta = np.sqrt(df)*mu/sigma # let's use a non central t distribution: nct and create a nc random variable nctrv = sst.nct(df, theta) # what is our t-threshold ? t_alph_null = sst.t.isf(alpha, df) spow = 1 - nctrv.cdf(t_alph_null) if plot: # define the domain of the plot normrv = sst.norm(0, 1.) bornesnc = nctrv.isf([0.001, .999]) bornesn = normrv.isf([0.001, .999]) x = np.linspace(np.min(bornesn), np.max(bornesnc), xlen) t_line = np.zeros_like(x) # define the line : find x that is closest to t_alph_null x_t_line = np.argmin((x-t_alph_null)**2) # define y: the max of the two pdf y_t_line = np.max(np.hstack((nctrv.pdf(x), normrv.pdf(x)))) t_line[x_t_line] = y_t_line fig, ax = plt.subplots() plt.plot(x, nctrv.pdf(x), 'g', x, normrv.pdf(x), 'b', x, t_line, 'r') # Make the shaded region # http://matplotlib.org/xkcd/examples/showcase/integral_demo.html a = x[x_t_line]; b = np.max(bornesnc); ix = np.linspace(a,b) iy = nctrv.pdf(ix) verts = [(a, 0)] + list(zip(ix, iy)) + [(b, 0)] poly = Polygon(verts, facecolor='0.9', edgecolor='0.5') ax.add_patch(poly) ax.set_xlabel("t-value - H1 centred on " + r"$\theta $" + " = {0:4.2f}; ".format(theta) + r"$\mu$" + " = {0:4.2f}".format(mu)); ax.set_ylabel("Probability(t)"); ax.set_title('H0 and H1 sampling densities ' + r'$\beta$' + '= {0:3.2f}; Sample size n = {1:d} '.format(spow,n)) plt.show() return spow # + slideshow={"slide_type": "slide"} n = 30 mu = .5 sigma = 1. pwr = stat_power(n, mu, sigma, plot=True, alpha=0.05, xlen=500) print ("Power = ", pwr, " Z effect (Non centrality parameter) = ", mu*np.sqrt(n)/sigma) # + slideshow={"slide_type": "slide"} n = 12 mu = .5 sigma = 1. pwr = stat_power(n, mu, sigma, plot=True, alpha=0.05, xlen=500) print("Power = ", pwr, " Z effect (Non centrality parameter): ", mu*np.sqrt(n)/sigma) # + [markdown] slideshow={"slide_type": "slide"} # ### Plot power as a function of the number of subject in the study # + slideshow={"slide_type": "fragment"} def pwr_funcofsubj(muse, nses, alpha=.05, sigma=1): """ muse: array of mu nses: array of number of subjects alpha: float, type I risk sigma: float, data sigma """ mstr = [ 'd='+str(m) for m in np.asarray(muse)/sigma] lines=[] for mu in (muse): pw = [stat_power(n, mu, sigma, alpha=alpha, plot=False) for n in nses] (pl,) = plt.plot(nses, pw) lines.append(pl) plt.legend( lines, mstr, loc='upper right', shadow=True) plt.xlabel(" Number of subjects ") plt.ylabel(" Power "); return None mus = (.05, .1,.2,.3,.4,.5, .6); #nse = range(70, 770, 20) nse = range(7, 77, 2) alph = 1.e-3 pwr_funcofsubj(mus, nse, alph) # + [markdown] slideshow={"slide_type": "slide"} # ### **** Here - play with n **** # + slideshow={"slide_type": "fragment"} mus = (.05,.1,.2,.3,.4,.5,.6); nse = range(10, 330, 20) #nse = range(7, 77, 2) alph = 0.001 pwr_funcofsubj(mus, nse, alph) # + [markdown] slideshow={"slide_type": "slide"} # ### Here - play with $\alpha$ # + slideshow={"slide_type": "fragment"} mus = (.05, .1,.2,.3,.4,.5, .6); nse = range(10, 770, 20) #nse = range(7, 77, 2) alph = 0.05/30000 pwr_funcofsubj(mus, nse, alph) # + [markdown] slideshow={"slide_type": "slide"} # ### What is the effect size of APOE on the hippocampal volume ? # + [markdown] slideshow={"slide_type": "fragment"} # Authors find p value of 6.63e-10 # # They had 733 subjects # # # + slideshow={"slide_type": "fragment"} n01 = sst.norm(0,1.) z = n01.isf(6.6311e-10) d = n01.isf(6.6311e-10)/np.sqrt(733) print("z = %4.3f d = %4.3f " %(z,d)) # + [markdown] slideshow={"slide_type": "slide"} # ## The questionnaire of Westover 2011 for medical students # + [markdown] slideshow={"slide_type": "fragment"} # Consider a typical medical research study, for example designed to test the efficacy of a drug, in which a null hypothesis H0 (’no effect’) is tested against an alternative hypothesis H 1 (’some effect’). Suppose that the study results pass a test of statistical significance (that is P-value<0.05) in favor of H1. What has been shown? # # # <NAME>., <NAME>., and <NAME>. (2011). Significance testing as perverse probabilistic reasoning. BMC Medicine 9, 20. # # + [markdown] slideshow={"slide_type": "fragment"} # 1. H0 is false. # 2. H1 is true. # 3. H0 is probably false. # 4. H1 is probably true. # 5. Both (1) and (2). # 6. Both (3) and (4). # 7. None of the above. # + [markdown] slideshow={"slide_type": "slide"} # ## Derivation of Ionannidis / Button positive prediction value : PPV # + [markdown] slideshow={"slide_type": "slide"} # ### Recall : Marginalize and Baye theorem # + [markdown] slideshow={"slide_type": "fragment"} # $\newcommand{Frac}[2]{\frac{\displaystyle #1}{\displaystyle #2}}$ # # Some new terms: # # * $Pr(H_A)$ - prior probability of $H_A$ - probability of $H_A$ before the experiment was conducted. # * $Pr(H_0)$ - prior probability of $H_0$ = $1 - Pr(H_A)$ - probability of null hypothesis before experiment conducted # # We are interested in updating the probability of $H_A$ and $H_0$ as a result of a test on some collected data. This updated probability is $Pr(H_A | T)$ - the probability of $H_A$ given the test result $T$. $Pr(H_A | T)$ is called the *posterior* probability because it is the probability after the test result. # # Remembering that # # $$P(A) = \sum_{b_i} P(A,B=b_i) $$ # # To simplify the notation, we note $P(B=b)$ as $P(b)$ # # Remembering [Bayes theorem](http://en.wikipedia.org/wiki/Bayes'_theorem#Derivation): # # $$P(A, B) = P(A | B) P(B)$$ # # and therefore # # $$P(A | B) = \Frac{P(B, A)}{P(B)} = \Frac{P(B | A) P(A)}{P(B)}$$ # # # Putting these two together we have : # # # $$P(A) = \sum_{b_i} P(A|B=b_i) P(B=b_i)$$ # # Now, apply this to the probability of the test results $T$. The test takes a value either under $H_A$ or $H_0$. # The probability of a *signficant* result of the test $T=T_S$ is : # # $Pr(T=T_S) = P(T_S) = Pr(T_S | H_A) Pr(H_A) + Pr(T_S | H_0) Pr(H_0)$ # + [markdown] slideshow={"slide_type": "fragment"} # # What is the posterior probability of $H_A$ given that the test is signficant? # # $P(H_A | T_S) = \Frac{P(T_S | H_A) P(H_A)}{P(T_S)} = \Frac{P(T_S | H_A) P(H_A)}{P(T_S | H_A) Pr(H_A) + Pr(T_S | H_0) Pr(H_0)}$ # # We have $P(T_S | H_A)$, $P(T_S | H_0)$ from the first column of the table above. Substituting into the equation: # # $P(H_A | T_S) = \Frac{(1 - \beta) P(H_A)}{(1 - \beta) P(H_A) + \alpha P(H_0)}$ # + [markdown] slideshow={"slide_type": "slide"} # Defining: # # $\pi := Pr(H_A)$, hence: $1 - \pi = Pr(H_0)$ # # we have: # # $P(H_A | T_S) = \Frac{(1 - \beta) \pi}{(1 - \beta) \pi + \alpha (1 - \pi)}$ # # + slideshow={"slide_type": "fragment"} from sympy.abc import pi # get symbolic variable pi post_prob = (1 - beta) * pi / ((1 - beta) * pi + alpha * (1 - pi)) post_prob # + slideshow={"slide_type": "fragment"} assoc = dict(t_s = pi * (1 - beta), t_ns = pi * beta, f_s = (1 - pi) * alpha, f_ns = (1 - pi) * (1 - alpha)) HTML(association_table(assoc, r'Considering prior $\pi := P(H_A)$')) # + [markdown] slideshow={"slide_type": "slide"} # ## From Button et al. : three messages # + [markdown] slideshow={"slide_type": "fragment"} # ### message1 : studies of low power have low probability of detecting an effect # + [markdown] slideshow={"slide_type": "fragment"} # #### From the definition of power ! # + [markdown] slideshow={"slide_type": "slide"} # ### message2 : studies of low power have low positive predictive value : PPV = P(H1 True | detection) # + [markdown] slideshow={"slide_type": "fragment"} # Same as Ioannidis - do the derivation starting with odd ratios # # From Button et al., we have the positive predictive value PPV defined as : # # $$ # PPV = \frac{(1-\beta)R}{(1-\beta)R + \alpha},\textrm{ with } R = P(H_1)/P(H_0) = P_1/P_0 = \pi / (1-\pi) # $$ # # Hence, # # $$ # PPV = \frac{(1-\beta)P_1}{P_0}\frac{P_0}{(1-\beta)P_1 + \alpha P_0} # $$ # # $$ # = \frac{(1-\beta)P_1}{(1-\beta)P_1 + \alpha P_0} # $$ # # $$ # = P(H_1, T_S) / P(T_S) = P(H_1 | T_S) # $$ # + [markdown] slideshow={"slide_type": "fragment"} # If we have 4 chances over 5 that $H_0$ is true, and one over five that $H_1$ true, then R = 1/5 / 4/5 = .25. If there's 30% power we have PPV = 50%. So, 50% chance that our result is indeed true. 80% power leads to 80% chance of $H_1$ to be true knowing that we have detected an effect at the $\alpha$ risk of error. # + slideshow={"slide_type": "slide"} def PPV(R, Pw, alpha, verbose=True): ppv = (Pw * R)/(Pw*R + alph) if verbose: print("with odd ratio=%3.2f, " "power=%3.2f, alpha=%3.2f, " "we have PPV=%3.2f" %(R,Pw,alph,ppv)) return ppv # + slideshow={"slide_type": "fragment"} # example from Button et al: P1 = 1/5, P0 = 4/5. R = 1/4 R = 1./4. Pw = .8 alph = .05 ppv = PPV(R, Pw, alph) # + slideshow={"slide_type": "fragment"} # if power is small : R = 1./4. Pw = np.arange(.1,1,.2) alph = .05 for pw in Pw: PPV(R, pw, alph) # + [markdown] slideshow={"slide_type": "slide"} # ### message3 : studies of low power exagerate the effect size of claimed effects # + [markdown] slideshow={"slide_type": "fragment"} # If the power is small, the studies that will find an effect significant will be estimating a stronger effect than the true one. Let's simulate the phenomenon. # + slideshow={"slide_type": "fragment"} n = 16 # number of subjects sigma = 1.0 mu = .4 alpha = .05 pw = stat_power(n, mu, sigma, alpha=alpha, plot=True) print("Power : ", pw) # + slideshow={"slide_type": "slide"} Nexp = 100 norv = sst.norm(0., sigma) strv = sst.t(n-1) t_05 = strv.isf(alpha) sample = norv.rvs(size=(n,1)) + mu #print np.std(sample) tval = np.zeros((Nexp,)) effects = np.zeros((Nexp,)) effectsize = np nb_detection = 0 for e in range(Nexp): sample = norv.rvs(size=(n,1)) + mu tval[e] = sample.mean()*np.sqrt(n)/np.std(sample, ddof=1) effects[e] = sample.mean()/np.std(sample, ddof=1) relbias = 100*(effects[tval > t_05].mean() - mu)/mu print("mean effect over experiments : {:.3f} and true : {:.3f}".format(effects.mean(), mu)) print("mean effect estimated when test is significant", effects[tval > t_05].mean()) print("relative bias: %3.2f " % relbias) estimated_effect = effects[tval > t_05].mean() # + slideshow={"slide_type": "slide"} x = range(Nexp) t05_line = np.ones_like(x)*t_05 true_effect = np.ones_like(x)*mu est_effect = np.ones_like(x)*estimated_effect lines = plt.plot(x, effects, 'b-', x, t05_line/np.sqrt(n), 'g-', x, true_effect, 'r--', x, est_effect, 'b.') plt.legend( lines, ('t','t05_line', 'true effect', 'est. effect'), loc='upper right', shadow=True) plt.xlabel(" experiments ") plt.ylabel(" t-value ") # + slideshow={"slide_type": "-"} from IPython.display import Image as Image # - # ### Another way of seeing the same thing in Button et al, 2013: # + slideshow={"slide_type": "fragment"} Image('./figures/button_fig5.png') # + [markdown] slideshow={"slide_type": "slide"} # ### Replication should be done with more subjects, not the same amount. # + [markdown] slideshow={"slide_type": "fragment"} # A graph which I found hard to understand, because in c under the null the variance of the estimated mean should be reduced as well. # + slideshow={"slide_type": "fragment"} Image('./figures/button_fig1.png') # + [markdown] slideshow={"slide_type": "slide"} # ### Looking at the median power study of 49 Meta analysis : median power of studies in those # + slideshow={"slide_type": "fragment"} Image('./figures/button_fig2.png') # + [markdown] slideshow={"slide_type": "slide"} # ## <NAME> 16 subjects magic number - cf <NAME>'s answer # + [markdown] slideshow={"slide_type": "fragment"} # * Rule number four: the under-sampled study # * Rule number five: the over-sampled study # * Rule number eight: exploit superstitious thinking about voodoo correlations # + [markdown] slideshow={"slide_type": "slide"} # ### KF arguments can be summarized by: # # 1. The null is always false: no effects are ever truly zero. Collect enough data and you will always end up rejecting the null. # # 2. Large effects are more important than small ones. Very small effects are ‘trivial’. # # 3. Very large samples will invariably end up identifying many small uninteresting effects. We should sample few subjects to detect only the big effects. # # + slideshow={"slide_type": "fragment"} # power of KF analysis : d=1, n = 16; mu = .5; sigma = 1.; alpha = 0.05 stat_power(n, mu, sigma, alpha=alpha, plot=False) # + [markdown] slideshow={"slide_type": "slide"} # ### 1. A less favorable case # + [markdown] slideshow={"slide_type": "fragment"} # #### We are doing many test and have to decrease $\alpha$ - # + slideshow={"slide_type": "fragment"} # analysis in a slightly less favorable case n = 16; mu = .5; sigma = 1.; alpha = 0.001 stat_power(n, mu, sigma, plot=True, alpha = alpha) # + [markdown] slideshow={"slide_type": "slide"} # ### In neuroimaging non corrected p-value are small, let's plot n as a function of alpha : # + slideshow={"slide_type": "-"} def n_power(pw=.8, mu=1., sigma=1., alpha=0.05): """ compute the number of subjects needed to get pw given mu, sigma and alpha """ norv = sst.norm(0,1.) ta = norv.ppf(1 - alpha) tb = norv.ppf(1 - pw) n = (sigma*(ta - tb)/mu)**2 return n # + slideshow={"slide_type": "fragment"} mu = .4; sigma = 1.; pw = .8 lnalph = np.arange(1.9,5.1,.1) nsub = [n_power(pw, mu, sigma, alpha=10**(-a)) for a in lnalph] plt.plot(lnalph, nsub) plt.xlabel(' exponent of the detection p-value alpha = 10^{-x} ') plt.ylabel(' number of subject required for power = %3.2f ' % pw) #xscale('log') # + [markdown] slideshow={"slide_type": "slide"} # ### 2. The uncertainty of small n results is very high. Confidence interval are just above zero. # + slideshow={"slide_type": "-"} def confidence_intervals(Nexp, **prmtrs): """ Nexp: the number of experiments done prmtrs: a dictionary with our parameters, example: prmtrs = {'n':16, 'mu':.3, 'sigma': 1., 'alpha': 0.05} returns arrays of size Nexp with: effect: the estimated effect detect: an array of 0 or 1, 1 when the effect is detected at alpha lCI: lower bound of confidence interval uCI: upper bound of confidence interval """ # unpack parameters: n = prmtrs['n'] mu = prmtrs['mu']; alpha = prmtrs['alpha']; sigma = prmtrs['sigma'] df = n-1 theta = mu*np.sqrt(n)/sigma # initialize arrays t = np.zeros((Nexp,)) effect = np.zeros((Nexp,)) lCI = np.zeros((Nexp,)) uCI = np.zeros((Nexp,)) # compute random variables and thresholds norv = sst.norm(0., sigma) strv = sst.t(df) # get the 0.05 t value *under the null* to construct confidence interval t_05 = strv.isf(0.05) # get the alpha level t value *under the null* to detect t_alph = strv.isf(alpha) for experim in range(Nexp): # get n sample sample = norv.rvs(size=(n,)) + mu # effect and normalized effect size effect[experim] = sample.mean() std_error_data = np.std(sample, ddof=1) std_error_mean = std_error_data/np.sqrt(n) # np.std takes ddof as # the df of freedom lost! here: 1. t[experim] = effect[experim]/std_error_mean # confidence interval : CI_05 = t_05*std_error_mean lCI[experim] = effect[experim] - CI_05 # t_alph # uCI[experim] = effect[experim] + CI_05 # t_alph # # number of detection: detect = t>t_alph # print 'number of detections:', xd.shape return (effect, detect, lCI, uCI) # + slideshow={"slide_type": "fragment"} #---------------------- parameters ------------------# prmtrs = {'n':30, 'mu':.25, 'sigma': 1., 'alpha': 0.15} theta = prmtrs['mu']*np.sqrt(prmtrs['n'])/prmtrs['sigma'] Pw = stat_power(prmtrs['n'], prmtrs['mu'], alpha=prmtrs['alpha']) print('mean t value should be around %3.2f \n' %theta) #-------------- simulate Nexp experiments ---------# Nexp = 10000 effect, detect, lCI, uCI = confidence_intervals(Nexp, **prmtrs) print("Compare power {:.3} and rate of detection {:.3} ".format(Pw, detect.sum()/Nexp)) print("Mean effect {:.3f} compared to average detected effect {:3f}".format( effect.mean(), effect[detect].mean())) print("Lower bound > mu: {} lCI > mu {:3f} detected: {} over {}".format( (lCI[detect]>mu).sum(), (lCI[detect]>mu).sum()/detect.sum(), detect.sum(), Nexp )) # - lCI[detect].mean(), (lCI[detect]>mu).sum(), (uCI[detect]<mu).sum(), lCI[detect].shape, lCI.mean() # + slideshow={"slide_type": "fragment"} #-------------- plot ------------------------------# x = np.arange(Nexp) xd = np.arange(detect.sum()) mu_line = np.ones((Nexp,))*prmtrs['mu'] # print the number of lower confidence interval values that are above the true mean: # this should be about the risk of error/2 print("lCI > mu : {:.3}, compare with {:.3} ".format( (lCI > mu).sum() / (1.*detect.sum()), prmtrs['alpha'])) # print(Nexp) # there should be none of these: # print "(lCI < 0 ", (lCI[detect] < 0).sum() / detect.sum() f = plt.figure(1).set_size_inches(12,4) lines = plt.plot(xd, lCI[detect], 'g-', xd, effect[detect], 'b--', xd, uCI[detect], 'r-', xd, mu_line[detect], 'k'); plt.legend( lines, ('lower_bound','detected Effect', 'Upper bound', 'True effect'), loc='upper right', shadow=True) plt.xlabel(" One x is one experiment where detection occured", fontdict={'size':14}) plt.ylabel(" Effect value and confidence interval ", fontdict={'size':14}) plt.title("Detected effects and their confidence interval", fontdict={'size':16}); # + [markdown] slideshow={"slide_type": "slide"} # ### 3. Our prior is that effect sizes are small # + [markdown] slideshow={"slide_type": "fragment"} # * big effects have been found : see Meyer et al (2001) # * big effect sizes are less likely than small ones : the prior of big effects is low # * large sample studies report lower effects - as well as intra subjects # + [markdown] slideshow={"slide_type": "fragment"} # Example of Desmond and Glover 2002: using a threshold of 0.01, working memory task, 12 subject: # # + slideshow={"slide_type": "fragment"} Image('./figures/Desmond_Glover_effectsize.png') # + [markdown] slideshow={"slide_type": "slide"} # ## "Protected inference" : # # ### Protect against small effect if you want to by redefining the null (i.e., effect is within [-.1 .1]) # + [markdown] slideshow={"slide_type": "slide"} # ## References # + [markdown] slideshow={"slide_type": "-"} # <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME> and <NAME>. Power failure: why small sample # size undermines the reliability of neuroscience. # # <NAME>, 2012 [citation needed]» Blog Archive » Sixteen is not magic: Comment on Friston (2012) # # Desmond and Glover, Estimating sample size in functional MRI (fMRI) neuroimaging # studies: Statistical power analyses, Journal of Neuroscience Methods 118 (2002) 115/128. # # <NAME>, 2012, Neuroimage. Ten ironic rules for non-statistical reviewers # # <NAME>, <NAME> and <NAME>. False-Positive Psychology: Undisclosed Flexibility in Data Collection and Analysis Allows Presenting Anything as Significant. Psychological Science. 2011. # # <NAME> & <NAME>. Drug development: Raise standards for preclinical cancer research. Nature, 2012. # # M <NAME>, <NAME>, <NAME>, 2011, BMC Medecine, Significance testing as perverse probabilistic reasoning. # # <NAME>. (2013). Revised standards for statistical evidence. Proceedings of the National Academy of Sciences 110, 19313–19317. # # <NAME>., <NAME>., <NAME>., and <NAME>. (2015). The fickle P value generates irreproducible results. Nature Methods 12, 179–185. # # <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2012). Does parametric fMRI analysis with SPM yield valid results?—An empirical study of 1484 rest datasets. NeuroImage 61, 565–578. # # # <NAME>. The secret lives of experiments: methods reporting in the fMRI literature. Neuroimage 63, # 289–300 (2012). # # This article reviews methods reporting and methodological choices across 241 recent fMRI studies and shows that there were nearly as many unique analytical pipelines as there were studies. In addition, many studies were underpowered to detect plausible effects. # # <NAME>: # # "Publication bias is a problem in all fields of research. The results of a paper should actually receive zero weight in the evaluation of its quality, otherwise there is the motivation to cherry-pick the data that give the most impressive result. The measure of quality should be the way the results were obtained – size of sample, experimental procedure, endpoints used. Ideally the reviewers of a paper should not see its results at all, only the description of the experiment." # # # ### Related blog posts: # # http://daniellakens.blogspot.ca/2015/11/the-relation-between-p-values-and.html # # # -
.ipynb_checkpoints/NSS_power_issues-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 7.0 # language: sagemath # name: sagemath # --- # + %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns #import scipy.io.wavfile import scipy.ndimage as sp #import calendar import os from PIL import Image from IPython.display import Audio # + faces = [] for each in os.listdir('./Database/'): faces.append(sp.imread('./Database/' + each)) faces = np.array(faces) # - fig = plt.figure(figsize=(5,5)) plt.grid(False) plt.imshow(faces[20]); # + sad_profs_filenames = [name for name in os.listdir('./Database/') if '04' in name] sad_profs = [] for filename in sad_profs_filenames: sad_profs.append(sp.imread('./Database/' + filename)) ''' #Shows whole class's sad faces for i in sad_profs: fig = plt.figure(figsize=(1,1)) plt.grid(False) plt.imshow(i) ''' # - giraffe = sp.imread('./giraffejpg') fig = plt.figure(figsize=(4,4)) plt.grid(False) plt.imshow(giraffe) print giraffe[0][0] print giraffe.shape numpixels = giraffe.shape[0]*giraffe.shape[1] print numpixels print giraffe.min() print giraffe.max() # + def opengrey(filelocation): return Image.open(filelocation).convert('LA') def turngrey(img): return img.convert('LA') def printpicture(obj): fig = plt.figure(figsize=(4,4)) plt.grid(False) plt.imshow(obj) printpicture(opengrey('./giraffejpg')) # - def meshpictures(location1, location2): img1 = Image.open(location1) img2 = Image.open(location2) meshed_arrays = (np.array(img1)/2 + np.array(img2)/2) meshed_images = Image.fromarray(np.uint8(meshed_arrays)) return meshed_images # + def tolocation(name1, name2): filelocation1 = './Database/faceimage_'+name1+'.png' filelocation2 = './Database/faceimage_'+name2+'.png' return meshpictures(filelocation1, filelocation2) printpicture(tolocation('taylorSheneman_00', 'dannyWolf_00')) # -
Day4/qea_day4_inclass.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="KRvZhCG_sFo6" # !pip install allennlp # + colab={} colab_type="code" id="6m0nFBV6O9O8" import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchtext import data from torchtext import datasets import random import numpy as np import time from allennlp.modules.attention import (CosineAttention, LinearAttention, BilinearAttention, AdditiveAttention, DotProductAttention) from allennlp.nn.util import weighted_sum SEED = 1234 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.backends.cudnn.deterministic = True TEXT = data.Field(tokenize = 'spacy', lower = True) LABEL = data.LabelField() # + colab={"base_uri": "https://localhost:8080/", "height": 67} colab_type="code" id="vrFxxb_wO9PA" outputId="22deeee6-43c0-4fa4-b468-2e93dedaebdd" train_data, valid_data, test_data = datasets.SNLI.splits(TEXT, LABEL) # + colab={"base_uri": "https://localhost:8080/", "height": 67} colab_type="code" id="TFOHD_f7O9PD" outputId="d67308fb-f5d1-4b9b-c6c4-56ce68483f3c" print(f"Number of training examples: {len(train_data)}") print(f"Number of validation examples: {len(valid_data)}") print(f"Number of testing examples: {len(test_data)}") # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="klEFghE-O9PG" outputId="3d52b472-9033-4fdb-d78f-d8f3958ee377" print(vars(train_data.examples[0])) # + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="fKU1zvs1O9PK" outputId="9862f170-c0f6-4147-c15d-dfd421d91f6d" MIN_FREQ = 2 TEXT.build_vocab(train_data, min_freq = MIN_FREQ, vectors = "glove.6B.300d", unk_init = torch.Tensor.normal_) LABEL.build_vocab(train_data) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="pako8gvpO9PM" outputId="91b24d91-6ede-435f-cbfc-7d7ff0c9f66f" print(f"Unique tokens in TEXT vocabulary: {len(TEXT.vocab)}") # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="rwSdrU2jO9PP" outputId="6ae05de9-4e84-40f8-b9c2-df70447d734b" print(TEXT.vocab.freqs.most_common(20)) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="KMneB8JIO9PS" outputId="18db5dfc-83f7-4ff1-93e9-dfd8ccb10946" print(TEXT.vocab.itos[:10]) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="E656jc54O9PU" outputId="e0a06f70-6caf-40b7-ae16-3be0ff54defa" print(LABEL.vocab.itos) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="6PSDjR0RO9PW" outputId="aa10ee7a-9791-47cc-93fc-792b8f5aa730" print(LABEL.vocab.freqs.most_common()) # + colab={} colab_type="code" id="VSxIFDzCO9PZ" BATCH_SIZE = 512 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits( (train_data, valid_data, test_data), batch_size = BATCH_SIZE, device = device) # + colab={} colab_type="code" id="Te24ffT4PLG7" # We do not use this implementation # This function takes the output and hidden tensors # of an lstm, or gru. We will use allennlp attentions instead def attention(out, hidden): out = out.permute(1,0,2) hidden = hidden.squeeze(0) attn_weights = torch.einsum('pqr,pr->pq', [out, hidden]) soft_attn_weights = F.softmax(attn_weights, 1) new_hid = torch.einsum('pqr,pq->pr', [out, soft_attn_weights]) return new_hid # + colab={} colab_type="code" id="a2ss7xoVt5qI" cosineAttention = CosineAttention(True) # + colab={} colab_type="code" id="zSVZiv_bt_Zh" # + colab={} colab_type="code" id="prJ__ntIO9QH" class NLIRNN(nn.Module): def __init__(self, vocab_size, encode_method, embedding_dim, hidden_dim, fc_layers, output_dim, dropout, pad_idx): super().__init__() assert encode_method in {'gru', 'lstm'} self.encode_method = encode_method self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx) self.translation = nn.Linear(embedding_dim, hidden_dim) if encode_method == 'gru': self.rnn = nn.GRU(hidden_dim, hidden_dim) elif encode_method == 'lstm': self.rnn = nn.LSTM(hidden_dim, hidden_dim) fcs = [nn.Linear(hidden_dim * 2, hidden_dim * 2) for _ in range(fc_layers)] self.fcs = nn.ModuleList(fcs) self.fc_out = nn.Linear(hidden_dim * 2, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, prem, hypo): #prem = [prem sent len, batch size] #hypo = [hypo sent len, batch size] embedded_prem = self.embedding(prem) embedded_hypo = self.embedding(hypo) #embedded_prem = [prem sent len, batch size, embedding dim] #embedded_hypo = [hypo sent len, batch size, embedding dim] translated_prem = F.relu(self.translation(embedded_prem)) translated_hypo = F.relu(self.translation(embedded_hypo)) #translated_prem = [prem sent len, batch size, hidden dim] #translated_hypo = [hypo sent len, batch size, hidden dim] if self.encode_method == 'gru': outputs_prem, hidden_prem = self.rnn(translated_prem) # instead of the following four lines we could use this: # hidden_prem = attention(outputs_prem, hidden_prem) outputs_prem = outputs_prem.permute(1,0,2) hidden_prem = hidden_prem.squeeze(0) hidden_prem_att = cosineAttention(hidden_prem, outputs_prem) hidden_prem = weighted_sum(hidden_prem, hidden_prem_att) outputs_hypo, hidden_hypo = self.rnn(translated_hypo) outputs_hypo = outputs_hypo.permute(1,0,2) hidden_hypo = hidden_hypo.squeeze(0) hidden_hypo_att = cosineAttention(hidden_hypo, outputs_hypo) hidden_hypo = weighted_sum(hidden_hypo, hidden_hypo_att) #or: hidden_hypo = attention(outputs_hypo, hidden_hypo) #outputs_x = [sent len, batch size, hid dim] #hidden_x = [1, batch size, hid dim] hidden_prem = hidden_prem.squeeze(0) hidden_hypo = hidden_hypo.squeeze(0) #hidden_x = [batch size, hid dim] else: outputs_prem, (hidden_prem, cell_prem) = self.rnn(translated_prem) outputs_prem = outputs_prem.permute(1,0,2) hidden_prem = hidden_prem.squeeze(0) hidden_prem_att = cosineAttention(hidden_prem, outputs_prem) hidden_prem = weighted_sum(hidden_prem, hidden_prem_att) #or: hidden_prem = attention(outputs_prem, hidden_prem) outputs_hypo, (hidden_hypo, cell_hypo) = self.rnn(translated_hypo) outputs_hypo = outputs_hypo.permute(1,0,2) hidden_hypo = hidden_hypo.squeeze(0) hidden_hypo_att = cosineAttention(hidden_hypo, outputs_hypo) hidden_hypo = weighted_sum(hidden_prem, hidden_hypo_att) #outputs_x = [sent len, batch size, hid dim] #hidden_x = [1, batch size, hid dim] #cell_x = [1, batch size, hid dim] hidden_prem = hidden_prem.squeeze(0) hidden_hypo = hidden_hypo.squeeze(0) #hidden_x = [batch size, hid dim] hidden = torch.cat((hidden_prem, hidden_hypo), dim=1) #hidden = [batch size, hid dim * 2] for fc in self.fcs: hidden = fc(hidden) hidden = F.relu(hidden) hidden = self.dropout(hidden) prediction = self.fc_out(hidden) #prediction = [batch size, output dim] return prediction # + colab={} colab_type="code" id="HEka-0PHO9Pe" INPUT_DIM = len(TEXT.vocab) EMBEDDING_DIM = 300 HIDDEN_DIM = 300 FC_LAYERS = 3 OUTPUT_DIM = len(LABEL.vocab) DROPOUT = 0.25 PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token] model = NLIRNN(INPUT_DIM, 'lstm', EMBEDDING_DIM, HIDDEN_DIM, FC_LAYERS, OUTPUT_DIM, DROPOUT, PAD_IDX) # + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" id="jgE5RSU5O9Pg" outputId="8ac3bb0e-0851-4e14-ba3a-9b6b7516dd1c" def init_weights(m): for name, param in m.named_parameters(): nn.init.normal_(param.data, mean=0, std=0.1) model.apply(init_weights) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="1GrdgTxwO9Pi" outputId="139ce1f7-bd0d-4871-f2aa-57730e4b6106" def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="p6T_-3D0O9Pl" outputId="15e91ab2-0a6f-4204-8ebd-1c4b7d1963fc" pretrained_embeddings = TEXT.vocab.vectors print(pretrained_embeddings.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="Ja1pRBi4O9Po" outputId="a4487511-37ef-4dfb-8e24-4d41c919e6f8" model.embedding.weight.data.copy_(pretrained_embeddings) # + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="mdYeUrJUO9Pq" outputId="fca4e617-ecaa-48ae-9a05-53254c01fe44" UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token] model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM) model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM) print(model.embedding.weight.data) # + colab={} colab_type="code" id="KDhldluJO9Ps" model.embedding.weight.requires_grad = False # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="UboB1KQhO9Pu" outputId="60e01d04-ae59-4972-d6f7-dcc39fd921bc" def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') # + colab={} colab_type="code" id="6t9DWlqAO9Py" optimizer = optim.Adam(model.parameters()) # + colab={} colab_type="code" id="1CA29FlcO9P1" criterion = nn.CrossEntropyLoss() # + colab={} colab_type="code" id="dVmMYAaBO9P4" model = model.to(device) criterion = criterion.to(device) # + colab={} colab_type="code" id="bLrymJA7O9P6" def categorical_accuracy(preds, y): """ Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8 """ max_preds = preds.argmax(dim = 1, keepdim = True) # get the index of the max probability correct = max_preds.squeeze(1).eq(y) return correct.sum() / torch.FloatTensor([y.shape[0]]) # + colab={} colab_type="code" id="4Gk_7ZvaO9P8" def train(model, iterator, optimizer, criterion): epoch_loss = 0 epoch_acc = 0 model.train() for batch in iterator: prem = batch.premise hypo = batch.hypothesis labels = batch.label optimizer.zero_grad() #prem = [prem sent len, batch size] #hypo = [hypo sent len, batch size] predictions = model(prem, hypo) #predictions = [batch size, output dim] #labels = [batch size] loss = criterion(predictions, labels) acc = categorical_accuracy(predictions, labels) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) # + colab={} colab_type="code" id="RbLeorN4O9P_" def evaluate(model, iterator, criterion): epoch_loss = 0 epoch_acc = 0 model.eval() with torch.no_grad(): for batch in iterator: prem = batch.premise hypo = batch.hypothesis labels = batch.label predictions = model(prem, hypo) loss = criterion(predictions, labels) acc = categorical_accuracy(predictions, labels) epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) # + colab={} colab_type="code" id="22p-fxxcO9QB" def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs # + colab={} colab_type="code" id="WvYFMZQhyGAA" N_EPOCHS = 10 best_valid_loss = float('inf') for epoch in range(N_EPOCHS): start_time = time.time() train_loss, train_acc = train(model, train_iterator, optimizer, criterion) valid_loss, valid_acc = evaluate(model, valid_iterator, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'tut1-model.pt') print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%') print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%') # + colab={} colab_type="code" id="rIeT-zttO9QF" test_loss, test_acc = evaluate(model, test_iterator, criterion) print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%') # + colab={} colab_type="code" id="608bEpnvXq5q" #Test Loss: 0.697 | Test Acc: 71.72%
1c Simple NLI Model with Attention on RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Ejercicio: Clasificación binaria # # # En este ejercicio vas a trabajar con un clasificador binario. Para ello utilizarás el dataset utilizado en el ejercicio 1, "usuarios_win_mac_lin.csv", que es un dataset en el que nos encontraremos datos de navegación de usuarios dentro de una web (páginas, duración...) y tendremos que predecir desde qué sistema operativo viene el usuario. # # Para que sea un problema binario, dado que hay 3 clases, nos vamos a quitar una de ellas. **Elimina del dataframe todas las filas que pertenezcan a la clase "1"** para quedarnos solo con 2 clases diferentes. # # - Compáralo con lo que obteníamos en el ejercicio 1: ¿Se mejora la precisión del algoritmo con dos clases vs regresión lineal, es decir, acertamos más con 3 o con 2 clases? ¿Por qué? # # - Grafica la curva ROC del algoritmo. # # *Nota: LogisticRegression() es una clase que tiene varios parámetros de entrada. Investiga (toca, modifica, prueba) las variables y comenta si hay alguna de ellas que, modificándola, mejore el porcentaje de acierto del problema.* # # https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html # # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, confusion_matrix # - df = pd.read_csv("data/usuarios_win_mac_lin.csv")
Bloque 3 - Machine Learning/01_Supervisado/2-Logistic Regression/ejercicios/02_Binary_logistic_class.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: scope # language: python # name: scope # --- # This notebooks downloads genesets as text files. import ndex2 import requests import json from multiprocessing import Pool import pandas as pd import zipfile import io # # Getting Wikipathways r = requests.get("http://public.ndexbio.org/v2/networkset/453c1c63-5c10-11e9-9f06-0ac135e8bacf") tmp = json.loads(r.content.decode("utf-8")) wikipathway_networks = tmp['networks'] def get_nodes(uuid): n = ndex2.create_nice_cx_from_server("http://public.ndexbio.org", uuid=uuid) dG = n.to_networkx() genes = [attrs['name'] for node, attrs in dG.node.items() \ if (attrs['Type'] == 'GeneProduct') and (attrs['__gpml:XrefDatasource'] in ['Ensembl', 'Entrez Gene'])] name_array = dG.name.split(' - ') name = name_array[1].strip() + f" ({name_array[0].strip()})" return (name, genes) # + # n = ndex2.create_nice_cx_from_server("http://public.ndexbio.org", uuid="cbcce7eb-7785-11e9-848d-0ac135e8bacf") # dG = n.to_networkx() # + pool = Pool(16) result = pool.map(get_nodes, wikipathway_networks) wikipathway_nodes = dict(result) wikipathway_nodes = { k:[i.strip().replace('\n', '') for i in v] for k,v in wikipathway_nodes.items() \ if len(v) >= 2 } # - with open("genesets/wikipathways.txt", "w") as f: for name, genelist in wikipathway_nodes.items(): line = f"{name};{','.join(genelist)}\n" f.write(line) # # Getting Cancer Hallmarks from msigdb hallmarks='''HALLMARK_ADIPOGENESIS HALLMARK_ALLOGRAFT_REJECTION HALLMARK_ANDROGEN_RESPONSE HALLMARK_ANGIOGENESIS HALLMARK_APICAL_JUNCTION HALLMARK_APICAL_SURFACE HALLMARK_APOPTOSIS HALLMARK_BILE_ACID_METABOLISM HALLMARK_CHOLESTEROL_HOMEOSTASIS HALLMARK_COAGULATION HALLMARK_COMPLEMENT HALLMARK_DNA_REPAIR HALLMARK_E2F_TARGETS HALLMARK_EPITHELIAL_MESENCHYMAL_TRANSITION HALLMARK_ESTROGEN_RESPONSE_EARLY HALLMARK_ESTROGEN_RESPONSE_LATE HALLMARK_FATTY_ACID_METABOLISM HALLMARK_G2M_CHECKPOINT HALLMARK_GLYCOLYSIS HALLMARK_HEDGEHOG_SIGNALING HALLMARK_HEME_METABOLISM HALLMARK_HYPOXIA HALLMARK_IL2_STAT5_SIGNALING HALLMARK_IL6_JAK_STAT3_SIGNALING HALLMARK_INFLAMMATORY_RESPONSE HALLMARK_INTERFERON_ALPHA_RESPONSE HALLMARK_INTERFERON_GAMMA_RESPONSE HALLMARK_KRAS_SIGNALING_DN HALLMARK_KRAS_SIGNALING_UP HALLMARK_MITOTIC_SPINDLE HALLMARK_MTORC1_SIGNALING HALLMARK_MYC_TARGETS_V1 HALLMARK_MYC_TARGETS_V2 HALLMARK_MYOGENESIS HALLMARK_NOTCH_SIGNALING HALLMARK_OXIDATIVE_PHOSPHORYLATION HALLMARK_P53_PATHWAY HALLMARK_PANCREAS_BETA_CELLS HALLMARK_PEROXISOME HALLMARK_PI3K_AKT_MTOR_SIGNALING HALLMARK_PROTEIN_SECRETION HALLMARK_REACTIVE_OXIGEN_SPECIES_PATHWAY HALLMARK_SPERMATOGENESIS HALLMARK_TGF_BETA_SIGNALING HALLMARK_TNFA_SIGNALING_VIA_NFKB HALLMARK_UNFOLDED_PROTEIN_RESPONSE HALLMARK_UV_RESPONSE_DN HALLMARK_UV_RESPONSE_UP HALLMARK_WNT_BETA_CATENIN_SIGNALING HALLMARK_XENOBIOTIC_METABOLISM''' def get_cancer_hallmarks(name): address = lambda x:f"http://software.broadinstitute.org/gsea/msigdb/download_geneset.jsp?geneSetName={x}&fileType=txt" response =(requests .get(address(name)) .content .decode('utf-8') .split('\n')) name = response[0].replace('_', ' ').title() geneset = response[2:] return (name, geneset) pool = Pool(16) out = pool.map(get_cancer_hallmarks, hallmarks.split('\n')) msigdb_hallmarks = dict(out) with open("genesets/msigdb_hallmarks.txt", "w") as f: for name, geneset in msigdb_hallmarks.items(): line = f"{name};{','.join(geneset)}\n" f.write(line) # # Getting CORUM complexes # + r = requests.get("https://mips.helmholtz-muenchen.de/corum/download/allComplexes.txt.zip", stream=True) z = zipfile.ZipFile(io.BytesIO(r.content)) corum = pd.read_csv(z.open("allComplexes.txt"), sep='\t', index_col='ComplexID') corum_map = corum[['ComplexName', 'subunits(Gene name)']].set_index('ComplexName').to_dict()['subunits(Gene name)'] corum_map = {k: v.split(';') for k,v in corum_map.items()} # - with open("genesets/corum.txt", "w") as f: for name, geneset in corum_map.items(): line = f"{name};{','.join(geneset)}\n" f.write(line) # # Combining geneset def parse_genesets(file): with open(file) as f: geneset = {} for line in f.readlines(): line = line.strip() if line: arr = line.split(';') genes = arr[1].split(',') geneset[arr[0]] = genes return geneset msigdb = parse_genesets('genesets/msigdb_hallmarks.txt') wikipathways = parse_genesets('genesets/wikipathways.txt') corum = parse_genesets('genesets/corum.txt') msigdb.update(corum) msigdb.update(wikipathways) with open("genesets/combined_genesets.txt", "w") as f: for k, v in msigdb.items(): line = f"{k};{','.join(v)}\n" f.write(line) # # Parsing DO # + # Parse Disease Ontology .obo file # Expect that the ID and name follows this pattern # [Term] on line n # id: on line n + 1 # name: on line n + 2 # If this expectation is not True, a ValueError will be raised r = requests.get("https://raw.githubusercontent.com/DiseaseOntology/HumanDiseaseOntology/master/src/ontology/HumanDO.obo") found = False found_line = None doid = None name = None header = [] line_count = 0 results = [] for line in r.content.decode('utf-8').split('\n'): line = line.strip() line_count += 1 if line_count <= 3: header.append(line) if line == '[Term]': found = True found_line = line_count elif found and line[:3] == 'id:': doid = line.split('id:')[1].strip() if line_count - found_line != 1: raise ValueError(f"Expected line {found_line + 1}, but line {line_count} found.") elif found and line[:5] == "name:": name = line.split("name:")[1].strip() if line_count - found_line != 2: raise ValueError(f"Expected line {found_line + 2}, but line {line_count} found.") results.append((doid, name)) found = False found_line = None doid = None name = None doid_name_mapping = dict(results) # - with open("doid/doid_name_mappings.txt", "w") as f: for line in header: line = f"#{line}\n" f.write(line) for i,j in doid_name_mapping.items(): line = f"{i}%{j}\n" f.write(line)
data/getting_genesets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: weldx real # language: python # name: weldx # --- import abc # %load_ext autoreload # %autoreload 2 # + import inspect from unittest.mock import MagicMock from weldx_widgets.widget_base import WidgetBase as base def all_subclasses(cls): return set(cls.__subclasses__()).union( [s for c in cls.__subclasses__() for s in all_subclasses(c)]) # + pycharm={"name": "#%%\n"} all_widgets = [] for cls in all_subclasses(base): args = inspect.signature(cls.__init__) _mock_args = dict() for k, v in args.parameters.items(): if k == "self": continue if k!= "kwargs" and v.POSITIONAL_OR_KEYWORD and v.default is inspect.Parameter.empty: _mock_args[k] = MagicMock() print("mocked ",k,v) try: all_widgets.append(cls(**_mock_args)) except TypeError as te: print(cls, te) continue # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} # display all for obj in all_widgets: obj.display()
demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Ddip User Guide # ### `%%dip` and `%autodip` --- Controlling Where the Code Are Executed # # `%%dip` is a cell magic that says run the entire cell in either local, or remote DDP process group, or both namespaces. # # `%autodip on" ` prepends `%%dip` to all subsequent cells, thus saving the repeated typing of `%%dip` in a stretch of cells all destined to be run in the DDP multiple processes. # # **Combining `%%dip` and `%autodip` offers a cell-by-cell control of execution destination, thus interactively switching between,say exploring/fixing up raw data in local notebook, and creating training dataset, and training on the DDP group.** # %load_ext Ddip # %makedip -g all --verbose False # %%dip? # + active="" # %%dip [-q] [-S SEE [SEE ...]] [{remote,local,everywhere}] # # %%dip - Parallel execution on cluster, allows transient output be displayed. # # positional arguments: # {remote,local,everywhere} # Where to run the cell, default is remote. # # optional arguments: # -q, --quiet Display any stdout only after task is finished, skip # all the transient, real-time output. # -S <SEE [SEE ...]>, --see <SEE [SEE ...]> # display outputs from process specified by a list of # ranks, or 'all'. Default to 0. # - # **By Default, `%%dip` alone implies `%%dip remote` execution, and to do so, it requires a DDP group be already created via the `%makedip` above** # # To see outputs from all the processs, use `--see all`, otherwise, only Rank-0 output is displayed. # # The following is run **only in all three DDP processes**, but not in the local notebook. # # Try adding `-q` flag to see the difference in behavior --- it's only useful if user really does not need to see the transient output (e.g. progress bar when training?) # %%dip --see all import time, random, os for i in (range(3)): print(f"{i} ", end='') time.sleep(1) print() x = random.uniform(1,10) print(f"Process [{os.getpid()}]: a uniform random number: {x}") # back in local notebook, i doesn't exist import os print(f"Back in local process [{os.getpid()}], x is....") x # **To run a cell in both local and DDP processes (local first, then DDP) use `%%dip everywere`.** # + # %%dip everywhere import os, random, time # The libraries are imported to all namespaces: local notebook and DDP process group. # - # As a second example, ***can you tell which output is the local notebook process?*** # + # %%dip everywhere --see all import os, random y = random.uniform(1,10) print(f"Process [{os.getpid()}]: a uniform random number: {y}") y # - # ### `%autodip` **To run a series of cells under the same `%%dip` magic** # # %autodip? # + active="" # %autodip [-a ARGS] [{on,off}] # # Prepend %%dip to subsequent cells so that they will run on the distributed data parallel cluster. # # positional arguments: # {on,off} Turn on auto-%dip for the cells after this one. # # optional arguments: # -a ARGS, --args ARGS In '-one -quoted string', flags and arguments to pass to %dip. # - # %autodip on # Next cell onward, will run in remote DDP process group. By default only rank-0 process output is displayed. dream = int(random.uniform(1,4)) print(f"Process [{os.getpid()}] dreaming {dream} seconds....") while dream > 0: print(".", end='', flush=True) time.sleep(1) dream -= 1 print(f"\nProcess [{os.getpid()}] woke up...") # **The default behavior of only streaming back output from rank 0 process** is useful when during distributed data parallel training, where rank 0 process aggregates and performs the all-reduce operation. Often times only rank-0's output is of interest. # # To see all outputs, for only one cell, use `%%dip --see all`, it'll be applied only to this cell. Afterwards, back to whatever %autodip on settings it was before. # %%dip --see all print(f"I am process [{os.getpid()}]") # Back to only display rank-0 output print(f"I am process [{os.getpid()}]") # #### **If `%%dip --see all` is desired for a series of cells, use the -a "args" flag to `%autodip`:** # %autodip -a "--see all" on print(f"I am process [{os.getpid()}], we are in automatic parallel execution mode.") print(f"The time now is {time.time()}") # #### While `%autodip` is `on`, a cell can ignore `%autodip` and change course to wherever, IF the first line begins with `%`. # # **Say in the middle of `%autodip on`, let us do something in the local notebook, and push the result to DDP:** # + # %%dip local s = f"Local process [{os.getpid()}] time was {time.time()}" # %dipush s # - # *Last cell overrides `%autodip`, now back to the last `%autodip -a "--see all" on` setting:* print(f"Process [{os.getpid()}] : {time.time()} vs {s}") # Type `%autodip` alone in a cell shows its current on or off status # %autodip # ### In typical DDP training, `%autodip on` is sufficient, as we don't usually care about the output of non-Rank 0 processes. # # #### To return to the local notebook namespaces for subsequent cells, turn off `%autodip` # %autodip off
notebooks/usage_%%dip_%autodip.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Y4GQIlz_O7Lt" # **Importing the requirements** # + id="4CLrR11nOsfh" executionInfo={"status": "ok", "timestamp": 1623945082286, "user_tz": -330, "elapsed": 2039, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report,confusion_matrix,accuracy_score # + [markdown] id="pIR7siK0PD7W" # **Addning the data** # + id="luxzwmTMPGIC" executionInfo={"status": "ok", "timestamp": 1623945086819, "user_tz": -330, "elapsed": 1053, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} url = 'https://raw.githubusercontent.com/AlbusDracoSam/NLP/main/Stock%20price%20NLP/Data.csv' df = pd.read_csv(url,encoding = "ISO-8859-1") # + [markdown] id="bgIXa8I8P7qz" # **Selecting the data** # + id="tOiJH7XrP953" executionInfo={"status": "ok", "timestamp": 1623945090786, "user_tz": -330, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} train = df[df['Date'] < '20150101'] test = df[df['Date'] > '20141231'] # + [markdown] id="pB7xkJLHQgFv" # **Feature engineering** # + colab={"base_uri": "https://localhost:8080/", "height": 777} id="PatG8dqmQixe" executionInfo={"status": "ok", "timestamp": 1623945093766, "user_tz": -330, "elapsed": 1044, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} outputId="f3418e21-9943-463a-954b-ca5e16e702c6" data = train.iloc[:,2:27] data.replace('[^a-zA-Z]'," ",regex=True,inplace=True) list1 = [i for i in range(25)] new_Index=[str(i) for i in list1] data.columns= new_Index data.head(5) # + [markdown] id="xLu8Eox9Rkik" # **Converting to Lower** # + colab={"base_uri": "https://localhost:8080/", "height": 221} id="NLI8qg6_Rnar" executionInfo={"status": "ok", "timestamp": 1623945097626, "user_tz": -330, "elapsed": 436, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} outputId="070d8551-94ab-4adf-bb21-296ffdeaba0c" for index in new_Index: data[index] = data[index].str.lower() data.head(1) # + [markdown] id="u6aWz6y3SkNP" # **Convert each row to paragraph** # + colab={"base_uri": "https://localhost:8080/"} id="fdCb81DSSsVh" executionInfo={"status": "ok", "timestamp": 1623945102256, "user_tz": -330, "elapsed": 1139, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} outputId="2007fc83-7ce0-4515-b8b6-002714b842cb" headlines = [] for row in range(0,len(data.index)): headlines.append(' '.join(str(x) for x in data.iloc[row,0:25])) print(headlines[0]) # + [markdown] id="FYmNwjSkrIJ2" # **Implementing BOW** # + id="H9YeFWwqVUrx" executionInfo={"status": "ok", "timestamp": 1623945155300, "user_tz": -330, "elapsed": 4407, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} countvector=CountVectorizer(ngram_range=(2,2)) traindataset=countvector.fit_transform(headlines) # + [markdown] id="ezSh-yyQrRBb" # **Implementing Randrom forest classifier** # + colab={"base_uri": "https://localhost:8080/"} id="ULmrhFaurQSB" executionInfo={"status": "ok", "timestamp": 1623945248986, "user_tz": -330, "elapsed": 63060, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} outputId="45bc1ad9-263a-424b-f200-1a0ad54ce360" randomclassifier=RandomForestClassifier(n_estimators=200,criterion='entropy') randomclassifier.fit(traindataset,train['Label']) # + [markdown] id="_H4Hf-Dxrp7X" # **Predicting for Test data** # + id="DoC54gpNrtte" executionInfo={"status": "ok", "timestamp": 1623945307980, "user_tz": -330, "elapsed": 1013, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} test_transform= [] for row in range(0,len(test.index)): test_transform.append(' '.join(str(x) for x in test.iloc[row,2:27])) test_dataset = countvector.transform(test_transform) predictions = randomclassifier.predict(test_dataset) # + [markdown] id="bbXg55Bxr2NC" # **Accuracy** # + id="rZjP_-fOr0st" executionInfo={"status": "ok", "timestamp": 1623945335738, "user_tz": -330, "elapsed": 501, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhNZpEjLWf8V3cSn82Ij9rpkjMcynHDTVRgpxDKvw=s64", "userId": "02096647901069856661"}} outputId="b607d942-ff37-439d-e2e8-5f621f628db1" colab={"base_uri": "https://localhost:8080/"} matrix=confusion_matrix(test['Label'],predictions) print(matrix) score=accuracy_score(test['Label'],predictions) print(score) report=classification_report(test['Label'],predictions) print(report)
Stock Price Prediction/Stock price NLP.ipynb
(* -*- coding: utf-8 -*- (* --- *) (* jupyter: *) (* jupytext: *) (* text_representation: *) (* extension: .ml *) (* format_name: light *) (* format_version: '1.5' *) (* jupytext_version: 1.14.4 *) (* kernelspec: *) (* display_name: OCaml *) (* language: ocaml *) (* name: iocaml *) (* --- *) (* + [markdown] deletable=true editable=true (* <h1> <NAME> </h1> *) (* *) (* <h2>Énoncé</h2> *) (* *) (* Une entreprise possède un parc important de machines identiques et fonctionnant de façon indépendante. *) (* Elle souhaite étudier les pannes de ces machines de façon à établir un plan de maintenance. *) (* *) (* <h3>Simulation des temps de bon fonctionnement</h3> *) (* *) (* *) (* On choisit une machine au hasard. On suppose que les arrivées des pannes sont indépendantes les *) (* unes des autres et que la machine est réparée instantanément. On a relevé les résultats suivants. *) (* *) (* <table> *) (* <tr> *) (* <td>Temps de bon fonctionnement</td> *) (* <td>temps de bon fonctionnement cumulé</td> *) (* *) (* </tr> *) (* <tr> *) (* <td>0,008</td> *) (* <td>0,008</td> *) (* </tr> *) (* <tr> *) (* <td>1,17</td> *) (* <td>1,25</td> *) (* </tr> *) (* <tr> *) (* <td>0,60</td> *) (* <td>1,85</td> *) (* </tr> *) (* <tr> *) (* <td>1,54</td> *) (* <td>3,34</td> *) (* *) (* </tr> *) (* </table> *) (* + [markdown] deletable=true editable=true (* <img src="FIGURE.svg" /> *) (* + [markdown] deletable=true editable=true (* Le temps $t$ de bon fonctionnement est donné par la loi $t=-\frac{\ln(r)}{2.5}$ où r est une variable aléatoire sur l'intervalle $[0,1]$. Effectuer 100 000 simulations, trouver le nombre de pannes sur une durée de 2 unités. On tracera la probabilité en fonction du nombre de pannes. *) (* *) (* <h2>Loi de Poisson</h2> *) (* *) (* On dit que $X$ suit une loi de Poisson de paramètre $\lambda$ si la probabilité qu'il existe exactement $k$ occurrences ( $k$ étant un entier naturel, $k$ = 0, 1, 2,...) est donnée par: $P(X = k)= \frac{\lambda ^k}{k!}e^{-\lambda}$. La moyenne des occurrences est alors $\lambda$. *) (* *) (* Calculer la moyenne du nombre de pannes; en déduire $\lambda$; comparer les valeurs obtenues par simulation aux valeurs théoriques de la loi de Poisson. *) (* *) (* *) (* <h3> Exploitation du modèle </h3> *) (* *) (* En considérant que X suit la loi de Poisson précédente, déterminer : *) (* <ul><li>la probabilité qu'une machine tombe en panne au moins trois fois (i.e. 3, 4, 5 ou plus) sur une *) (* durée de 2 unités suivant sa mise en service .</ul></li> *) (* <ul><li>le nombre maximum de pannes d'une machine (sur une durée de 2 unités suivant sa mise en service) *) (* avec une probabilité d'au moins 95 %.</ul></li> *) (* + [markdown] deletable=true editable=true (* <h1> Solution </h1> *) (* + deletable=true editable=true open Random;; Random.self_init;; #use "topfind";; #require "plplot";; open Plplot;; module P = Plot;; let couleurs_list = [[ 0;255;255;255]; (*`white*) [ 1; 0; 0; 0]; (*`black*) [ 2; 0; 0;255]; (*`blue*) [ 3;255; 0; 0]; (*`red*) [ 4;165; 42; 42]; (*`brown*) [ 5; 0; 0; 0]; [ 6; 0; 0; 0]; [ 7; 0; 0; 0]; [ 8; 0; 0; 0]; [ 9; 0; 0; 0]; [10;200;200;200]; (*`gray*) [11; 0;255;255]; (*`light_blue*) [12; 0;255; 0]; (*`green*) [13;255;255; 0]; (*`yellow*) [14;255; 0;255]; (*`pink*) [15;160; 0;213]; (*`purple*) ] let rec loop couleurs_list = match couleurs_list with | [n;r;g;b]::tl -> plscol0 n r g b; loop tl | _ -> ();; let couleurs = (fun () -> plscolbg 255 255 255; loop couleurs_list) let initialisation filename xmin xmax ymin ymax = P.init (xmin, ymin) (xmax, ymax) `greedy (`svg `core) ~filename:(filename^".svg") ~pre:couleurs let xlabel texte = P.text_outside `black (`bottom 0.5) 3. texte let ylabel texte = P.text_outside `black (`left 0.5) 5. texte let label texte_x texte_y titre = P.label texte_x texte_y titre (* + deletable=true editable=true let range start stop step = let rec range i acc = if step>0 && i>=stop then List.rev acc else if step<0 && i<=stop then List.rev acc else range (i+step) (i::acc) in range start [];; (* + deletable=true editable=true let nombre_pannes () = "à compléter" "à compléter" let xs = Array.of_list (List.map float_of_int liste_k) in let ys = compteurs in let ys'= Array.of_list liste_poisson in let p = initialisation "graph" 0. 20. 0. 0.18 in P.plot ~stream:p [P.lines `blue xs ys; P.lines `red xs ys'; xlabel "nombre de pannes"; ylabel "probabilité"; P.legend [[P.line_legend "simulation" `blue]; [P.line_legend "Poisson" `red]]]; P.finish ~stream:p ();; (* + [markdown] deletable=true editable=true (* <img src="graph.svg" /> *) (* + deletable=true editable=true
Loi_Poisson/Loi_Poisson_OCaml_sujet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: datenguide # language: python # name: datenguide # --- # # Dashboard to explore data with ipywidget controls # # # ### Set up requirements, import packages # + # %load_ext autoreload # %autoreload import os if not os.path.basename(os.getcwd()) == "datenguide-python": os.chdir("..") from datenguidepy.query_helper import get_regions, get_statistics from datenguidepy import Query import pandas as pd import ipywidgets as widgets from ipywidgets import interact, interactive # %matplotlib inline pd.set_option('display.max_colwidth', 150) # - # ### Create dashboard # # Using ipywidgets you can browse data types here. Opening the list of regions will take a few seconds. # # If the result remains empty the selected region have no corresponding data for the selected statistic. # # First lets select the NUTS region level. Based on this value the region selector will list the available regions. # # # #### Choosing region and statistic # # After selecting NUTS level choose from the list of regions and statistics. all_statistics = get_statistics().reset_index().loc[:,["short_description", "statistic"]] all_statistics = sorted([tuple(x) for x in all_statistics.to_numpy()]) # + def nuts_selector(nuts_level): regions = get_regions().query(f"level == {nuts_level}").reset_index()[["name", "region_id"]] regions = sorted([tuple(x) for x in regions.to_numpy()]) @interact def get_statistics_dashboard(region=regions, statistic=all_statistics): q = Query.region(region) try: field = q.add_field(statistic) except KeyError as e: return(f"Statistic not available: {e}") return q.results() interact(nuts_selector, nuts_level=[('NUTS 1', "'nuts1'"), ('NUTS 2', "'nuts2'"), ('NUTS 3', "'nuts3'")]);
use_case/use_case_ipywidgets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Federated PyTorch UNET Tutorial # Install dependencies if not already installed # !pip install torch # First of all we need to set up our OpenFL workspace. To do this, simply run the `fx.init()` command as follows: # + import openfl.native as fx # Setup default workspace, logging, etc. Install additional requirements fx.init('torch_unet_kvasir') # + code_folding=[] # Import installed modules import PIL import json import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np from skimage import io from torchvision import transforms as tsf import matplotlib.pyplot as plt from torch.utils.data import Dataset, DataLoader from hashlib import sha384 from os import path from os import listdir from openfl.federated import FederatedModel, FederatedDataSet from openfl.utilities import TensorKey # - # Download Kvasir dataset # !wget 'https://datasets.simula.no/hyper-kvasir/hyper-kvasir-segmented-images.zip' -O kvasir.zip ZIP_SHA384 = 'e30d18a772c6520476e55b610a4db457237f151e'\ '19182849d54b49ae24699881c1e18e0961f77642be900450ef8b22e7' assert sha384(open('./kvasir.zip', 'rb').read( path.getsize('./kvasir.zip'))).hexdigest() == ZIP_SHA384 # !unzip -n kvasir.zip -d ./data # Now we are ready to define our dataset and model to perform federated learning on. DATA_PATH = './data/segmented-images/' # + code_folding=[] def read_data(image_path, mask_path): """ Read image and mask from disk. """ img = io.imread(image_path) assert(img.shape[2] == 3) mask = io.imread(mask_path) return (img, mask[:, :, 0].astype(np.uint8)) class KvasirDataset(Dataset): """ Kvasir dataset contains 1000 images for all collaborators. Args: data_path: path to dataset on disk collaborator_count: total number of collaborators collaborator_num: number of current collaborator is_validation: validation option """ def __init__(self, data_path, collaborator_count, collaborator_num, is_validation): self.images_path = './data/segmented-images/images/' self.masks_path = './data/segmented-images/masks/' self.images_names = [ img_name for img_name in sorted(listdir(self.images_path)) if len(img_name) > 3 and img_name[-3:] == 'jpg' ] self.images_names = self.images_names[collaborator_num:: collaborator_count] self.is_validation = is_validation assert(len(self.images_names) > 8) validation_size = len(self.images_names) // 8 if is_validation: self.images_names = self.images_names[-validation_size:] else: self.images_names = self.images_names[: -validation_size] self.img_trans = tsf.Compose([ tsf.ToPILImage(), tsf.Resize((332, 332)), tsf.ToTensor(), tsf.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]) self.mask_trans = tsf.Compose([ tsf.ToPILImage(), tsf.Resize((332, 332), interpolation=PIL.Image.NEAREST), tsf.ToTensor()]) def __getitem__(self, index): name = self.images_names[index] img, mask = read_data(self.images_path + name, self.masks_path + name) img = self.img_trans(img).numpy() mask = self.mask_trans(mask).numpy() return img, mask def __len__(self): return len(self.images_names) # - # Here we redefine `FederatedDataSet` methods, if we don't want to use default batch generator from `FederatedDataSet`. # + code_folding=[] class KvasirFederatedDataset(FederatedDataSet): def __init__(self, collaborator_count=1, collaborator_num=0, batch_size=1, **kwargs): """Instantiate the data object Args: collaborator_count: total number of collaborators collaborator_num: number of current collaborator batch_size: the batch size of the data loader **kwargs: additional arguments, passed to super init """ super().__init__([], [], [], [], batch_size, num_classes=2, **kwargs) self.collaborator_num = int(collaborator_num) self.batch_size = batch_size self.training_set = KvasirDataset( DATA_PATH, collaborator_count, collaborator_num, is_validation=False ) self.valid_set = KvasirDataset( DATA_PATH, collaborator_count, collaborator_num, is_validation=True ) self.train_loader = self.get_train_loader() self.val_loader = self.get_valid_loader() def get_valid_loader(self, num_batches=None): return DataLoader(self.valid_set, num_workers=8, batch_size=self.batch_size) def get_train_loader(self, num_batches=None): return DataLoader( self.training_set, num_workers=8, batch_size=self.batch_size, shuffle=True ) def get_train_data_size(self): return len(self.training_set) def get_valid_data_size(self): return len(self.valid_set) def get_feature_shape(self): return self.valid_set[0][0].shape def split(self, collaborator_count, shuffle=True, equally=True): return [ KvasirFederatedDataset(collaborator_count, collaborator_num, self.batch_size) for collaborator_num in range(collaborator_count) ] # - # Our Unet model # + code_folding=[] def soft_dice_loss(output, target): num = target.size(0) m1 = output.view(num, -1) m2 = target.view(num, -1) intersection = m1 * m2 score = 2.0 * (intersection.sum(1) + 1) / (m1.sum(1) + m2.sum(1) + 1) score = 1 - score.sum() / num return score def soft_dice_coef(output, target): num = target.size(0) m1 = output.view(num, -1) m2 = target.view(num, -1) intersection = m1 * m2 score = 2.0 * (intersection.sum(1) + 1) / (m1.sum(1) + m2.sum(1) + 1) return score.sum() class DoubleConv(nn.Module): def __init__(self, in_ch, out_ch): super(DoubleConv, self).__init__() self.in_ch = in_ch self.out_ch = out_ch self.conv = nn.Sequential( nn.Conv2d(in_ch, out_ch, 3, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True), nn.Conv2d(out_ch, out_ch, 3, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True), ) def forward(self, x): x = self.conv(x) return x class Down(nn.Module): def __init__(self, in_ch, out_ch): super(Down, self).__init__() self.mpconv = nn.Sequential( nn.MaxPool2d(2), DoubleConv(in_ch, out_ch) ) def forward(self, x): x = self.mpconv(x) return x class Up(nn.Module): def __init__(self, in_ch, out_ch, bilinear=False): super(Up, self).__init__() self.in_ch = in_ch self.out_ch = out_ch if bilinear: self.Up = nn.Upsample( scale_factor=2, mode="bilinear", align_corners=True ) else: self.Up = nn.ConvTranspose2d(in_ch, in_ch // 2, 2, stride=2) self.conv = DoubleConv(in_ch, out_ch) def forward(self, x1, x2): x1 = self.Up(x1) diffY = x2.size()[2] - x1.size()[2] diffX = x2.size()[3] - x1.size()[3] x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2)) x = torch.cat([x2, x1], dim=1) x = self.conv(x) return x class UNet(nn.Module): def __init__(self, n_channels=3, n_classes=1): super().__init__() self.inc = DoubleConv(n_channels, 64) self.down1 = Down(64, 128) self.down2 = Down(128, 256) self.down3 = Down(256, 512) self.down4 = Down(512, 1024) self.up1 = Up(1024, 512) self.up2 = Up(512, 256) self.up3 = Up(256, 128) self.up4 = Up(128, 64) self.outc = nn.Conv2d(64, n_classes, 1) def forward(self, x): x1 = self.inc(x) x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x = self.up1(x5, x4) x = self.up2(x, x3) x = self.up3(x, x2) x = self.up4(x, x1) x = self.outc(x) x = torch.sigmoid(x) return x def validate( self, col_name, round_num, input_tensor_dict, use_tqdm=False, **kwargs ): """ Validate. Redifine function from PyTorchTaskRunner, to use our validation""" self.rebuild_model(round_num, input_tensor_dict, validation=True) self.eval() self.to(self.device) val_score = 0 total_samples = 0 loader = self.data_loader.get_valid_loader() if use_tqdm: loader = tqdm.tqdm(loader, desc="validate") with torch.no_grad(): for data, target in loader: samples = target.shape[0] total_samples += samples data, target = ( torch.tensor(data).to(self.device), torch.tensor(target).to(self.device), ) output = self(data) # get the index of the max log-probability val = soft_dice_coef(output, target) val_score += val.sum().cpu().numpy() origin = col_name suffix = "validate" if kwargs["apply"] == "local": suffix += "_local" else: suffix += "_agg" tags = ("metric", suffix) output_tensor_dict = { TensorKey("dice_coef", origin, round_num, True, tags): np.array( val_score / total_samples ) } return output_tensor_dict, {} def optimizer(x): return optim.Adam(x, lr=1e-3) # - # Create `KvasirFederatedDataset`, federated datasets for collaborators will be created in `split()` method of this object fl_data = KvasirFederatedDataset(batch_size=6) # The `FederatedModel` object is a wrapper around your Keras, Tensorflow or PyTorch model that makes it compatible with OpenFL. It provides built-in federated training function which will be used while training. Using its `setup` function, collaborator models and datasets can be automatically obtained for the experiment. # Create a federated model using the pytorch class, optimizer function, and loss function fl_model = FederatedModel(build_model=UNet, optimizer=optimizer, loss_fn=soft_dice_loss, data_loader=fl_data) collaborator_models = fl_model.setup(num_collaborators=2) collaborators = {'one': collaborator_models[0], 'two': collaborator_models[1]} # We can see the current FL plan values by running the `fx.get_plan()` function # + code_folding=[] # Get the current values of the FL plan. Each of these can be overridden print(fx.get_plan()) # - # Now we are ready to run our experiment. If we want to pass in custom FL plan settings, we can easily do that with the `override_config` parameter # Run experiment, return trained FederatedModel final_fl_model = fx.run_experiment( collaborators, override_config={'aggregator.settings.rounds_to_train': 30}) # Save final model final_fl_model.save_native('final_pytorch_model') # Let's visually evaluate the results collaborator = collaborator_models[0] loader = collaborator.runner.data_loader.get_valid_loader() model = final_fl_model.model model.eval() device = final_fl_model.runner.device model.to(device) with torch.no_grad(): for batch, _ in zip(loader, range(5)): preds = model(batch[0].to(device)) for image, pred, target in zip(batch[0], preds, batch[1]): plt.figure(figsize=(10, 10)) plt.subplot(131) plt.imshow(image.permute(1, 2, 0).data.cpu().numpy() * 0.5 + 0.5) plt.title("img") plt.subplot(132) plt.imshow(pred[0].data.cpu().numpy()) plt.title("pred") plt.subplot(133) plt.imshow(target[0].data.cpu().numpy()) plt.title("targ") plt.show()
openfl-tutorials/Federated_PyTorch_UNET_Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="0jQ3uyu1YWye" import re, string, unicodedata import nltk import contractions import inflect from bs4 import BeautifulSoup from nltk import word_tokenize, sent_tokenize from nltk.corpus import stopwords from nltk.stem import LancasterStemmer, WordNetLemmatizer # + id="APq08V2hYeeh" sample = """<h1>Title Goes Here</h1> <b>Bolded Text</b> <i>Italicized Text</i> <img src="this should all be gone"/> <a href="this will be gone, too">But this will still be here!</a> I run. He ran. She is running. Will they stop running? I talked. She was talking. They talked to them about running. Who ran to the talking runner? [Some text we don't want to keep is in here] ¡Sebastián, Nicolás, Alejandro and Jéronimo are going to the store tomorrow morning! something... is! wrong() with.,; this :: sentence. I can't do this anymore. I didn't know them. Why couldn't you have dinner at the restaurant? My favorite movie franchises, in order: Indiana Jones; Marvel Cinematic Universe; Star Wars; Back to the Future; Harry Potter. Don't do it.... Just don't. Billy! I know what you're doing. This is a great little house you've got here. [This is some other unwanted text] John: "Well, well, well." James: "There, there. There, there." &nbsp;&nbsp; There are a lot of reasons not to do this. There are 101 reasons not to do it. 1000000 reasons, actually. I have to go get 2 tutus from 2 different stores, too. 22 45 1067 445 {{Here is some stuff inside of double curly braces.}} {Here is more stuff in single curly braces.} [DELETE] </body> </html>""" # + id="bVtoO0sZZKix" def strip_html(text): soup = BeautifulSoup(text, "html.parser") return soup.get_text() # + id="YytCuhhIa0bK" def remove_between_square_brackets(text): return re.sub('\[[^]]*\]', '', text) # + id="pq01wITTa4NH" def denoise_text(text): text = strip_html(text) text = remove_between_square_brackets(text) return text # + colab={"base_uri": "https://localhost:8080/"} id="oCEVLlIMbBAu" outputId="e8b01846-5622-405b-c87a-0a059ca40645" sample = denoise_text(sample) print(sample) # + colab={"base_uri": "https://localhost:8080/"} id="ZXotXcZSbDOa" outputId="0a35cf82-58ff-4e95-bf37-3b1a59f72758" def replace_contractions(text): """Replace contractions in string of text""" return contractions.fix(text) sample = replace_contractions(sample) print(sample) # + colab={"base_uri": "https://localhost:8080/"} id="prNUjwy6baEb" outputId="11970bdf-ce5a-4157-f79a-e73d43eec437" import nltk nltk.download('punkt') # + colab={"base_uri": "https://localhost:8080/"} id="Dnud6fu7bP5J" outputId="3f1cf3f0-137f-4fae-a4e0-ef5e3463ad8d" words = nltk.word_tokenize(sample) print(words) # + colab={"base_uri": "https://localhost:8080/"} id="Vnib2TRvbTuy" outputId="4ccc5608-0f1a-4210-cb98-2921eae1080e" def remove_non_ascii(words): """Remove non-ASCII characters from list of tokenized words""" new_words = [] for word in words: new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore') new_words.append(new_word) return new_words def to_lowercase(words): """Convert all characters to lowercase from list of tokenized words""" new_words = [] for word in words: new_word = word.lower() new_words.append(new_word) return new_words def remove_punctuation(words): """Remove punctuation from list of tokenized words""" new_words = [] for word in words: new_word = re.sub(r'[^\w\s]', '', word) if new_word != '': new_words.append(new_word) return new_words def replace_numbers(words): """Replace all interger occurrences in list of tokenized words with textual representation""" p = inflect.engine() new_words = [] for word in words: if word.isdigit(): new_word = p.number_to_words(word) new_words.append(new_word) else: new_words.append(word) return new_words def remove_stopwords(words): """Remove stop words from list of tokenized words""" new_words = [] for word in words: if word not in stopwords.words('english'): new_words.append(word) return new_words def stem_words(words): """Stem words in list of tokenized words""" stemmer = LancasterStemmer() stems = [] for word in words: stem = stemmer.stem(word) stems.append(stem) return stems def lemmatize_verbs(words): """Lemmatize verbs in list of tokenized words""" lemmatizer = WordNetLemmatizer() lemmas = [] for word in words: lemma = lemmatizer.lemmatize(word, pos='v') lemmas.append(lemma) return lemmas def normalize(words): words = remove_non_ascii(words) words = to_lowercase(words) words = remove_punctuation(words) words = replace_numbers(words) words = remove_stopwords(words) return words words = normalize(words) print(words) # + colab={"base_uri": "https://localhost:8080/"} id="h7XG1GgbbmnP" outputId="04702dc7-2ac1-47e1-8dc7-35e847c70aea" import nltk nltk.download('stopwords') # + colab={"base_uri": "https://localhost:8080/"} id="_ghbWEw1cEvm" outputId="f6719aa8-fadd-4d57-9f1b-6c82797b5a30" import nltk nltk.download('wordnet') # + colab={"base_uri": "https://localhost:8080/"} id="ftb5wsUebsAM" outputId="517c5a9f-483a-4d9c-a014-3bc890efd261" def stem_and_lemmatize(words): stems = stem_words(words) lemmas = lemmatize_verbs(words) return stems, lemmas stems, lemmas = stem_and_lemmatize(words) print('Stemmed:\n', stems) print('\nLemmatized:\n', lemmas) # + id="_rU6L-92cCTr"
Text_Data_Preprocessing_A_Walkthrough_in_Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/StevenJokes/d2l-en-read/blob/master/dcgan_switch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="3hoh0_oyyu3t" colab_type="text" # The following additional libraries are needed to run this # notebook. Note that running on Colab is experimental, please report a Github # issue if you have any problem. # + id="ORqgekkwyu3v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="734ad6f0-dcd9-4d92-fb51-17ef30f7e71d" # !pip install d2l==0.14.3 # !pip install -U mxnet-cu101mkl==1.6.0.post0 # updating mxnet to at least v1.6 # + [markdown] origin_pos=0 id="So7mRxS_yu32" colab_type="text" # # Deep Convolutional Generative Adversarial Networks # :label:`sec_dcgan` # # In :numref:`sec_basic_gan`, we introduced the basic ideas behind how GANs work. We showed that they can draw samples from some simple, easy-to-sample distribution, like a uniform or normal distribution, and transform them into samples that appear to match the distribution of some dataset. And while our example of matching a 2D Gaussian distribution got the point across, it is not especially exciting. # # In this section, we will demonstrate how you can use GANs to generate photorealistic images. We will be basing our models on the deep convolutional GANs (DCGAN) introduced in :cite:`Radford.Metz.Chintala.2015`. We will borrow the convolutional architecture that have proven so successful for discriminative computer vision problems and show how via GANs, they can be leveraged to generate photorealistic images. # # + attributes={"classes": [], "id": "", "n": "1"} origin_pos=1 tab=["mxnet"] id="87ZQfcCCyu33" colab_type="code" colab={} from mxnet import gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() # + [markdown] origin_pos=2 id="7XgEeeBqyu36" colab_type="text" # ## The Pokemon Dataset # # The dataset we will use is a collection of Pokemon sprites obtained from [pokemondb](https://pokemondb.net/sprites). First download, extract and load this dataset. # # + attributes={"classes": [], "id": "", "n": "2"} origin_pos=3 tab=["mxnet"] id="WQJGQPrByu37" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="43988542-185c-4509-f3e0-3e1f7ff0c0d6" #@save d2l.DATA_HUB['pokemon'] = (d2l.DATA_URL + 'pokemon.zip', 'c065c0e2593b8b161a2d7873e42418bf6a21106c') data_dir = d2l.download_extract('pokemon') pokemon = gluon.data.vision.datasets.ImageFolderDataset(data_dir) # + [markdown] origin_pos=4 id="c5c6Hz3ryu3_" colab_type="text" # We resize each image into $64\times 64$. The `ToTensor` transformation will project the pixel value into $[0, 1]$, while our generator will use the tanh function to obtain outputs in $[-1, 1]$. Therefore we normalize the data with $0.5$ mean and $0.5$ standard deviation to match the value range. # # + attributes={"classes": [], "id": "", "n": "3"} origin_pos=5 tab=["mxnet"] id="7BWvgxQRyu3_" colab_type="code" colab={} batch_size = 256 transformer = gluon.data.vision.transforms.Compose([ gluon.data.vision.transforms.Resize(64), gluon.data.vision.transforms.ToTensor(), gluon.data.vision.transforms.Normalize(0.5, 0.5) ]) data_iter = gluon.data.DataLoader( pokemon.transform_first(transformer), batch_size=batch_size, shuffle=True, num_workers=d2l.get_dataloader_workers()) # + [markdown] origin_pos=6 id="7JAtUYeSyu4E" colab_type="text" # Let us visualize the first 20 images. # # + attributes={"classes": [], "id": "", "n": "4"} origin_pos=7 tab=["mxnet"] id="sfjewJBAyu4F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 480} outputId="6a520d3a-96a8-434d-9ced-ef0d58964930" d2l.set_figsize((4, 4)) for X, y in data_iter: imgs = X[0:20,:,:,:].transpose(0, 2, 3, 1)/2+0.5 d2l.show_images(imgs, num_rows=4, num_cols=5) break # + [markdown] origin_pos=8 id="vWZlJxxbyu4J" colab_type="text" # ## The Generator # # The generator needs to map the noise variable $\mathbf z\in\mathbb R^d$, a length-$d$ vector, to a RGB image with width and height to be $64\times 64$ . In :numref:`sec_fcn` we introduced the fully convolutional network that uses transposed convolution layer (refer to :numref:`sec_transposed_conv`) to enlarge input size. The basic block of the generator contains a transposed convolution layer followed by the batch normalization and ReLU activation. # # + attributes={"classes": [], "id": "", "n": "5"} origin_pos=9 tab=["mxnet"] id="zhP8XPzgyu4J" colab_type="code" colab={} class G_block(nn.Block): def __init__(self, channels, kernel_size=4, strides=2, padding=1, **kwargs): super(G_block, self).__init__(**kwargs) self.conv2d_trans = nn.Conv2DTranspose( channels, kernel_size, strides, padding, use_bias=False) self.batch_norm = nn.BatchNorm() self.activation = nn.Activation('relu') def forward(self, X): return self.activation(self.batch_norm(self.conv2d_trans(X))) # + [markdown] origin_pos=10 id="rEY4Ri05yu4M" colab_type="text" # In default, the transposed convolution layer uses a $k_h = k_w = 4$ kernel, a $s_h = s_w = 2$ strides, and a $p_h = p_w = 1$ padding. With a input shape of $n_h^{'} \times n_w^{'} = 16 \times 16$, the generator block will double input's width and height. # # $$ # \begin{aligned} # n_h^{'} \times n_w^{'} &= [(n_h k_h - (n_h-1)(k_h-s_h)- 2p_h] \times [(n_w k_w - (n_w-1)(k_w-s_w)- 2p_w]\\ # &= [(k_h + s_h (n_h-1)- 2p_h] \times [(k_w + s_w (n_w-1)- 2p_w]\\ # &= [(4 + 2 \times (16-1)- 2 \times 1] \times [(4 + 2 \times (16-1)- 2 \times 1]\\ # &= 32 \times 32 .\\ # \end{aligned} # $$ # # + attributes={"classes": [], "id": "", "n": "6"} origin_pos=11 tab=["mxnet"] id="u-DVuuz8yu4N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="139b1905-a600-4d67-a3f2-1d03295cef27" x = np.zeros((2, 3, 16, 16)) g_blk = G_block(20) g_blk.initialize() g_blk(x).shape # + [markdown] origin_pos=12 id="IpTuBGY1yu4Q" colab_type="text" # If changing the transposed convolution layer to a $4\times 4$ kernel, $1\times 1$ strides and zero padding. With a input size of $1 \times 1$, the output will have its width and height increased by 3 respectively. # # + attributes={"classes": [], "id": "", "n": "7"} origin_pos=13 tab=["mxnet"] id="erfBssN3yu4S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="483f0689-0944-441c-f340-e898f67b740d" x = np.zeros((2, 3, 1, 1)) g_blk = G_block(20, strides=1, padding=0) g_blk.initialize() g_blk(x).shape # + [markdown] origin_pos=14 id="XePW4gGhyu4W" colab_type="text" # The generator consists of four basic blocks that increase input's both width and height from 1 to 32. At the same time, it first projects the latent variable into $64\times 8$ channels, and then halve the channels each time. At last, a transposed convolution layer is used to generate the output. It further doubles the width and height to match the desired $64\times 64$ shape, and reduces the channel size to $3$. The tanh activation function is applied to project output values into the $(-1, 1)$ range. # # + attributes={"classes": [], "id": "", "n": "8"} origin_pos=15 tab=["mxnet"] id="_mVTyGnjyu4W" colab_type="code" colab={} n_G = 64 net_G = nn.Sequential() net_G.add(G_block(n_G*8, strides=1, padding=0), # Output: (64 * 8, 4, 4) G_block(n_G*4), # Output: (64 * 4, 8, 8) G_block(n_G*2), # Output: (64 * 2, 16, 16) G_block(n_G), # Output: (64, 32, 32) nn.Conv2DTranspose( 3, kernel_size=4, strides=2, padding=1, use_bias=False, activation='tanh')) # Output: (3, 64, 64) # + [markdown] origin_pos=16 id="xJ9VyVoRyu4Z" colab_type="text" # Generate a 100 dimensional latent variable to verify the generator's output shape. # # + attributes={"classes": [], "id": "", "n": "9"} origin_pos=17 tab=["mxnet"] id="yAGeQ4Hdyu4Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="af6842cb-4f32-4a48-dfc1-20fcf8c4a17e" x = np.zeros((1, 100, 1, 1)) net_G.initialize() net_G(x).shape # + [markdown] origin_pos=18 id="6t0uqiN5yu4d" colab_type="text" # ## Discriminator # # The discriminator is a normal convolutional network network except that it uses a leaky ReLU as its activation function. Given $\alpha \in[0, 1]$, its definition is # # $$\textrm{leaky ReLU}(x) = \begin{cases}x & \text{if}\ x > 0\\ \alpha x &\text{otherwise}\end{cases}.$$ # # As it can be seen, it is normal ReLU if $\alpha=0$, and an identity function if $\alpha=1$. For $\alpha \in (0, 1)$, leaky ReLU is a nonlinear function that give a non-zero output for a negative input. It aims to fix the "dying ReLU" problem that a neuron might always output a negative value and therefore cannot make any progress since the gradient of ReLU is 0. # # + attributes={"classes": [], "id": "", "n": "10"} origin_pos=19 tab=["mxnet"] id="jYo7x59ryu4d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 267} outputId="185f6051-83c7-43df-bb97-3abf66da50f5" alphas = [0, 0.2, 0.4, .6, .8, 1] x = np.arange(-2, 1, 0.1) Y = [nn.LeakyReLU(alpha)(x).asnumpy() for alpha in alphas] d2l.plot(x.asnumpy(), Y, 'x', 'y', alphas) # + [markdown] origin_pos=20 id="Y9ZYFU9nyu4h" colab_type="text" # The basic block of the discriminator is a convolution layer followed by a batch normalization layer and a leaky ReLU activation. The hyperparameters of the convolution layer are similar to the transpose convolution layer in the generator block. # # + attributes={"classes": [], "id": "", "n": "11"} origin_pos=21 tab=["mxnet"] id="Ycjl9oqLyu4h" colab_type="code" colab={} class D_block(nn.Block): def __init__(self, channels, kernel_size=4, strides=2, padding=1, alpha=0.2, **kwargs): super(D_block, self).__init__(**kwargs) self.conv2d = nn.Conv2D( channels, kernel_size, strides, padding, use_bias=False) self.batch_norm = nn.BatchNorm() self.activation = nn.LeakyReLU(alpha) def forward(self, X): return self.activation(self.batch_norm(self.conv2d(X))) # + [markdown] origin_pos=22 id="vrixcuCOyu4l" colab_type="text" # A basic block with default settings will halve the width and height of the inputs, as we demonstrated in :numref:`sec_padding`. For example, given a input shape $n_h = n_w = 16$, with a kernel shape $k_h = k_w = 4$, a stride shape $s_h = s_w = 2$, and a padding shape $p_h = p_w = 1$, the output shape will be: # # $$ # \begin{aligned} # n_h^{'} \times n_w^{'} &= \lfloor(n_h-k_h+2p_h+s_h)/s_h\rfloor \times \lfloor(n_w-k_w+2p_w+s_w)/s_w\rfloor\\ # &= \lfloor(16-4+2\times 1+2)/2\rfloor \times \lfloor(16-4+2\times 1+2)/2\rfloor\\ # &= 8 \times 8 .\\ # \end{aligned} # $$ # # + attributes={"classes": [], "id": "", "n": "12"} origin_pos=23 tab=["mxnet"] id="CdhA3QNLyu4l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="6e7cc432-59fe-4aa2-aa54-4b3c4c734d71" x = np.zeros((2, 3, 16, 16)) d_blk = D_block(20) d_blk.initialize() d_blk(x).shape # + [markdown] origin_pos=24 id="ocQw6id9yu4o" colab_type="text" # The discriminator is a mirror of the generator. # # + attributes={"classes": [], "id": "", "n": "13"} origin_pos=25 tab=["mxnet"] id="k1IF7LJjyu4p" colab_type="code" colab={} n_D = 64 net_D = nn.Sequential() net_D.add(D_block(n_D), # Output: (64, 32, 32) D_block(n_D*2), # Output: (64 * 2, 16, 16) D_block(n_D*4), # Output: (64 * 4, 8, 8) D_block(n_D*8), # Output: (64 * 8, 4, 4) nn.Conv2D(1, kernel_size=4, use_bias=False)) # Output: (1, 1, 1) # + [markdown] origin_pos=26 id="AHM_xD75yu4r" colab_type="text" # It uses a convolution layer with output channel $1$ as the last layer to obtain a single prediction value. # # + attributes={"classes": [], "id": "", "n": "15"} origin_pos=27 tab=["mxnet"] id="wxtmnoZtyu4s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5ec0c0a2-28fd-4f0b-9eac-d58b5855b45e" x = np.zeros((1, 3, 64, 64)) net_D.initialize() net_D(x).shape # + [markdown] origin_pos=28 id="RUOx0Zgxyu4v" colab_type="text" # ## Training # # Compared to the basic GAN in :numref:`sec_basic_gan`, we use the same learning rate for both generator and discriminator since they are similar to each other. In addition, we change $\beta_1$ in Adam (:numref:`sec_adam`) from $0.9$ to $0.5$. It decreases the smoothness of the momentum, the exponentially weighted moving average of past gradients, to take care of the rapid changing gradients because the generator and the discriminator fight with each other. Besides, the random generated noise `Z`, is a 4-D tensor and we are using GPU to accelerate the computation. # # + attributes={"classes": [], "id": "", "n": "20"} origin_pos=29 tab=["mxnet"] id="PBE3t5V2yu4v" colab_type="code" colab={} def train(net_D, net_G, data_iter, num_epochs, lr, latent_dim, device=d2l.try_gpu()): loss = gluon.loss.SigmoidBCELoss() net_D.initialize(init=init.Normal(0.02), force_reinit=True, ctx=device) net_G.initialize(init=init.Normal(0.02), force_reinit=True, ctx=device) trainer_hp = {'learning_rate': lr, 'beta1': 0.5} trainer_D = gluon.Trainer(net_D.collect_params(), 'adam', trainer_hp) trainer_G = gluon.Trainer(net_G.collect_params(), 'adam', trainer_hp) animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[1, num_epochs], nrows=2, figsize=(5, 5), legend=['generator', 'discriminator']) animator.fig.subplots_adjust(hspace=0.3) for epoch in range(1, num_epochs + 1): # Train one epoch timer = d2l.Timer() metric = d2l.Accumulator(3) # loss_D, loss_G, num_examples for X, _ in data_iter: batch_size = X.shape[0] Z = np.random.normal(0, 1, size=(batch_size, latent_dim, 1, 1)) X, Z = X.as_in_ctx(device), Z.as_in_ctx(device), metric.add(d2l.update_D(X, Z, net_D, net_G, loss, trainer_D), d2l.update_G(Z, net_D, net_G, loss, trainer_G), batch_size) # Show generated examples Z = np.random.normal(0, 1, size=(21, latent_dim, 1, 1), ctx=device) # Normalize the synthetic data to N(0, 1) fake_x = net_G(Z).transpose(0, 2, 3, 1) / 2 + 0.5 imgs = np.concatenate( [np.concatenate([fake_x[i * 7 + j] for j in range(7)], axis=1) for i in range(len(fake_x)//7)], axis=0) animator.axes[1].cla() animator.axes[1].imshow(imgs.asnumpy()) # Show the losses loss_D, loss_G = metric[0] / metric[2], metric[1] / metric[2] animator.add(epoch, (loss_D, loss_G)) print(f'loss_D {loss_D:.3f}, loss_G {loss_G:.3f}, ' f'{metric[2] / timer.stop():.1f} examples/sec on {str(device)}') # + [markdown] origin_pos=30 id="ShRDwmOWyu4y" colab_type="text" # We train the model with a small number of epochs just for demonstration. # For better performance, # the variable `num_epochs` can be set to a larger number. # # + attributes={"classes": [], "id": "", "n": "21"} origin_pos=31 tab=["mxnet"] id="rf_RJW4gyu4y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 442} outputId="e956a844-3d4f-489e-cd91-32b7ba49a9e3" latent_dim, lr, num_epochs = 100, 0.005, 20 train(net_D, net_G, data_iter, num_epochs, lr, latent_dim) # + [markdown] origin_pos=32 id="v9AyuIt6yu40" colab_type="text" # ## Summary # # * DCGAN architecture has four convolutional layers for the Discriminator and four "fractionally-strided" convolutional layers for the Generator. # * The Discriminator is a 4-layer strided convolutions with batch normalization (except its input layer) and leaky ReLU activations. # * Leaky ReLU is a nonlinear function that give a non-zero output for a negative input. It aims to fix the “dying ReLU” problem and helps the gradients flow easier through the architecture. # # # ## Exercises # # 1. What will happen if we use standard ReLU activation rather than leaky ReLU? # 1. Apply DCGAN on Fashion-MNIST and see which category works well and which does not. # # + [markdown] origin_pos=33 tab=["mxnet"] id="S6f__KzCyu41" colab_type="text" # [Discussions](https://discuss.d2l.ai/t/409) #
chapter-generative-adversarial-networks/dcgan_switch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [MIT License](https://opensource.org/licenses/MIT) # # Copyright 2018 <NAME> (<EMAIL>) # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import plotly.plotly as py import pandas as pd import plotly.graph_objs as go from geopy.geocoders import Nominatim from geopy import distance # distance.distance(tup, tup) takes a pair of (lat, lon) and # calculates the distance between them in km. from collections import namedtuple as namedtuple from time import sleep from IPython.core.display import display, HTML geo = namedtuple('geo', ['name', 'lat', 'lon']) marker_style = namedtuple('marker_style', ['size', 'color', 'opacity']) class EasyMap (): # This is the DCS 103 public token. It can be changed if needed. # That is, if it gets out in the wild, then we'll want to swap it/remove it. token = ("<PASSWORD>") meta = {} # Our default style is a reasonable sized dot in Bates Garnet. marker_style = marker_style(8, '#881124', 1) def __init__(self, map_name, username): self.meta["map_name"] = map_name self.meta["username"] = username self.set_defaults() # Sets the MapBox API token. def set_token(self, tok): self.token = tok def set_geocode_delay(secs): self.meta["geocode_delay"] = secs def set_defaults(self): # Must be a MapBox style URL. self.meta["style"] = "mapbox://styles/mjadud/cjmyx30qr32g02rpcsrunes4v" self.meta["locator"] = Nominatim(user_agent="PPPP " + self.meta["username"]) # Create an empty dictionary of geopoints. self.meta["geos"] = dict() # Center this on Lewistion, Maine # We'll do this the hard way, because I don't want it plotted on the map. self.meta["center"] = geo("Lewiston, ME", 44.100351, -70.2147764) # Make sure we're ready to accommodate marker styles. self.meta["category_styles"] = dict() self.meta["category_styles"]["default"] = self.marker_style self.meta["geocode_delay"] = 0.5 # CONTRACT # set_center : string -> geo # PURPOSE # Lets us recenter the map. Does not add it to the list of points # that we are plotting. Returns the geo of the point. def set_center(self, addr): g = None pt = self.meta["locator"].geocode(addr) if not pt: print("Could not look up the address [{0}]".format(addr)) return None else: g = geo(addr, pt.latitude, pt.longitude) self.meta["center"] = g return g # CONTRACT # set_style : url -> _ # PURPOSE # Consumes a MapBox style URL, and sets it for configuring # the output map via Plotly. def set_style(self, style_url): self.meta.style = style_url # CONTRACT # set_category_style : mstyle -> _ # PURPOSE # Takes an mstyle, and stores it for marker plotting. def set_category_style(self, cat, size, color, opacity): self.meta["category_styles"][cat] = marker_style(size, color, opacity) # CONTRACT # add_point : string number number -> geo # PURPOSE # Adds a point to the map. Returns a geo structure of the lat/lon given. def add_point(self, name, lat, lon, cat = "default"): if cat in self.meta["geos"]: self.meta["geos"][cat][name] = geo(name, lat, lon) else: self.meta["geos"][cat] = dict() self.meta["geos"][cat][name] = geo(name, lat, lon) return self.meta["geos"][cat][name] # CONTRACT # add_address : string -> geo # PURPOSE # Adds a point to the map based on an address. # Returns the geo associated with that point. def add_address(self, addr, cat = "default"): # geolocator.geocode("2 Andrews Road Lewiston Maine") pt = self.meta["locator"].geocode(addr) sleep(self.meta["geocode_delay"]) if not pt: print("Could not look up the address [{0}]".format(addr)) return None else: return self.add_point(addr, pt.latitude, pt.longitude, cat = cat) def setup_map(self): self.meta["data"] = [] data = [] # To build up the data for the plot, we need each category # of points to be a different style. This requires walking each # category separately. # The key is the category, the d is a dict of geos. for cat, d in self.meta["geos"].items(): lats = list() lons = list() texts = list() for k, v in d.items(): texts.append(k) lats.append(v.lat) lons.append(v.lon) data.append( go.Scattermapbox(lat = lats, lon = lons, mode = 'markers', marker = dict(size = self.meta["category_styles"][cat].size, color = self.meta["category_styles"][cat].color, opacity = self.meta["category_styles"][cat].opacity ), text = texts) ) self.meta["data"] = data self.meta["layout"] = go.Layout(autosize = True, hovermode = 'closest', mapbox = dict(accesstoken = self.token, bearing = 0, center = dict(lat = self.meta["center"].lat, lon = self.meta["center"].lon), pitch = 0, zoom = 12, style = self.meta["style"])) def iplot (self): self.setup_map() fig = dict(data = self.meta["data"], layout = self.meta["layout"]) mapname = "PPPP " + self.meta["username"] + " - " + self.meta["username"] obj = py.iplot(fig, filename = mapname) self.meta["url"] = obj.resource return obj def get_url(self): if "url" in self.meta: return display(HTML('<a href="{0}" target="_blank">{0}</a>'.format(self.meta["url"]))) else: return None
courses/2018-f-dcs103/PPPP100.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import datetime import pandas as pd import torch import matplotlib.pyplot as plt path = os.path.expanduser("~/github/owid/covid-19-data/public/data/vaccinations/vaccinations.csv") df = pd.read_csv(path, header=0) df portion = df["total_vaccinations_per_hundred"].to_numpy() / 100 portion locations = df["location"].to_list() location_id = sorted(set(locations)) location_id = {name: i for i, name in enumerate(location_id)} locations = torch.tensor([location_id[n] for n in locations], dtype=torch.long) # + def parse_date(s): return datetime.datetime.strptime(s, "%Y-%m-%d") start_date = parse_date("2020-12-01") dates = torch.tensor([(parse_date(d) - start_date).days for d in df["date"]]) assert dates.min() >= 0 # - T = int(1 + dates.max()) R = len(location_id) dense_portion = torch plt.scatter(dates, portion, s=10, alpha=0.5);
explore-owid-vaccinations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os from tqdm.notebook import tqdm from pathlib import Path import pandas as pd from nltk.lm import Vocabulary import sys sys.path.append("../../lib") from metrics import levenshtein import pickle folder = "../../data/ICDAR2019_POCR_competition_dataset/ICDAR2019_POCR_competition_training_18M_without_Finnish/SL/" output_folder = Path("../../data/sl") files = sorted(os.listdir(folder)) len(files) # + import glob files = glob.glob(folder + '/**/*.txt', recursive=True) len(files) # + from multiprocessing import Pool def extract(name): with open(name) as file: return file.readlines() def create_windows(x): A, B, window_length = x assert len(A) == len(B) return [(A[i:i + window_length], B[i:i + window_length]) for i in range(len(A) + 1)] p = Pool(4) data = list(p.imap_unordered(extract, tqdm(files), chunksize = 128)) len(data) # + # data = [] # for f in tqdm(files): # with open(f) as file: # data.append(file.readlines()) data = pd.DataFrame(data, columns = ["ocr_to_input", "ocr_aligned", "gs_aligned"])\ .assign(ocr_to_input = lambda df: df.ocr_to_input.str.replace("[OCR_toInput] ", "", regex = False), ocr_aligned = lambda df: df.ocr_aligned.str.replace("[OCR_aligned] ", "", regex = False), gs_aligned = lambda df: df.gs_aligned.str.replace("[ GS_aligned] ", "", regex = False)) print(data.shape) data.head() # - data.applymap(len).describe() levenshtein(reference = data.gs_aligned.str.replace("@", ""), hypothesis = data.ocr_to_input).cer.describe() levenshtein(reference = data.gs_aligned, hypothesis = data.ocr_aligned).cer.describe() vocabulary = Vocabulary(data.ocr_to_input.sum() + data.ocr_aligned.sum() + data.gs_aligned.sum()) print(len(vocabulary)) with open(output_folder/"data/vocabulary.pkl", "wb") as file: pickle.dump(vocabulary, file) dev = data.sample(n = 5, random_state = 1) dev.to_pickle(output_folder/"data/dev.pkl") dev.shape train = data.drop(dev.index) train.to_pickle(output_folder/"data/train.pkl") train.shape train.applymap(len).describe() dev.applymap(len).describe() levenshtein(reference = dev.gs_aligned.str.replace("@", ""), hypothesis = dev.ocr_to_input).cer.describe() levenshtein(reference = dev.gs_aligned, hypothesis = dev.ocr_to_input).cer.describe() window_length = 100 df = train#.head(100) train_aligned = list(p.imap_unordered(create_windows, tqdm(zip(df.ocr_aligned, df.gs_aligned, [window_length for x in df.ocr_aligned]), total = len(df.ocr_aligned)), chunksize = 128)) s = [] for r in tqdm(train_aligned): s.extend(r) train_aligned = pd.DataFrame(s, columns = ["source", "target"]) print(train_aligned.shape) train_aligned.head() train_aligned = train_aligned.assign(source = lambda df: df.source.str.replace("@", "")) train_aligned.head() dev_aligned = dev.apply(lambda r: create_windows((r["ocr_aligned"], r["gs_aligned"], window_length)), axis = 1).sum() dev_aligned = pd.DataFrame(dev_aligned, columns = ["source", "target"]) print(dev_aligned.shape) dev_aligned.head() dev_aligned = dev_aligned.assign(source = lambda df: df.source.str.replace("@", "")) dev_aligned.head() train_aligned.to_pickle(output_folder/"data/train_aligned.pkl") dev_aligned.to_pickle(output_folder/"data/dev_aligned.pkl")
notebooks/sl/0_exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Compensation for a non-linear charge sensor # We analyse the effect of a non-linear sensing dot on the value for the tunnel coupling obtained from the fitting of an inter-dot transition line. The sensing dot shape is simulated based on a Gaussian, while the data of the inter-dot transition is experimental data. # # First we load all necessary packages # + import os import qcodes import matplotlib.pyplot as plt import time import numpy as np import scipy from qcodes.data.hdf5_format import HDF5Format import qtt import qtt.pgeometry from qtt.data import load_example_dataset from qtt.algorithms.functions import gaussian from qtt.algorithms.tunneling import polmod_all_2slopes, fit_pol_all from qtt.algorithms.chargesensor import DataLinearizer, correctChargeSensor # %matplotlib inline np.set_printoptions(suppress=True, precision=3) # - def show_pol_fit(delta, signal, par_fit, fig=1): """ Show data of a polarization fit """ plt.figure(fig) plt.clf() plt.plot(delta, signal, 'bo') plt.plot(delta, polmod_all_2slopes(delta, par_fit, kT), 'r') plt.title('Tunnel coupling: %.2f (ueV) = %.2f (GHz)' % (par_fit[0], par_fit[0] / h)) plt.xlabel('Difference in chemical potentials (ueV)') _ = plt.ylabel('Signal (a.u.)') # Define physical constants and parameters h = 1e9*scipy.constants.h/(1e-6*scipy.constants.elementary_charge) # Planck's constant in units [ueV/GHz] kb = scipy.constants.k/(1e-6*scipy.constants.elementary_charge) # [ueV/K], Boltzmann constant kT = 10e-3 * kb # effective electron temperature in ueV # ### Load example dataset and define signal of charge sensor # + dataset = load_example_dataset('2017-02-21/15-59-56') signal = np.array(dataset.default_parameter_array('signal')) delta = np.array(dataset.default_parameter_array('signal').set_arrays[0]) # Define signal of the sensing dot xs = np.arange(-600, 0, 3.) ys = gaussian(xs, -300, std=70, amplitude=510, offset=2) ys = gaussian(xs, -300, std=70, amplitude=510, offset=20) # - # ### Find range of sensing dot used # The correction to the non-linearity of the charge sensor is done by fitting a linear function in the region of interest. dl, results = correctChargeSensor(delta, signal, xs, ys, fig=100) plt.plot(xs, ys, '.b', label='raw data of charge sensor') plt.legend() plt.title('Data of charge sensor', fontsize=16) _ = plt.xlabel('Plunger [mV]') _ = plt.ylabel('Signal (a.u.)') # Determine the corrected data points xsignal = dl.backward_curve(signal) signal_corrected = dl.forward(xsignal) # make sure data is in similar range # ### Fit the polarization line # The effect of the non-linear charge sensor is a 5% error in the estimated tunnel coupling. # + par_fit,_ , _ = fit_pol_all(delta, signal, kT, par_guess=None) show_pol_fit(delta, signal, par_fit, fig=1) par_fit_corrected, _, _ = fit_pol_all(delta, signal_corrected, kT) show_pol_fit(delta, signal_corrected, par_fit_corrected, fig=2) print('tunnel coupling: %.1f [GHz]' % (par_fit[0] / h)) print('tunnel coupling with compensation: %.1f [GHz]' % (par_fit_corrected[0] / h)) # - print('### fitted parameters ###') print(par_fit) print(par_fit_corrected)
docs/notebooks/analysis/example_charge_sensor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import unittest class TestStringMethods(unittest.TestCase): def test_upper(self): self.assertEqual('foo'.upper(), 'FOO') def test_isupper(self): self.assertTrue('FOO'.isupper()) self.assertFalse('Foo'.isupper()) def test_split(self): s = 'hello world' self.assertEqual(s.split(), ['hello', 'world']) # check that s.split fails when the separator is not a string with self.assertRaises(TypeError): s.split(2) #if __name__ == '__main__': # unittest.main(argv=['first-arg-is-ignored'], exit=False) # - import random import unittest class TestSequenceFunctions(unittest.TestCase): def setUp(self): self.seq = list(range(10)) def test_shuffle(self): #make sure the shuffled sequence does not lose any elements random.shuffle(self.seq) self.seq.sort() self.assertEqual(self.seq, list(range(10))) # should raise an exception for an immutable sequence self.assertRaises(TypeError, random.shuffle , (1,2,3)) def test_choice(self): element = random.choice(self.seq) self.assertTrue(element in self.seq) def test_sample(self): with self.assertRaises(ValueError): random.sample(self.seq, 20) for element in random.sample(self.seq, 5): self.assertTrue(element in self.seq) unittest.main(argv=['first-arg-is-ignored'], exit=False) unittest.main(argv=['first-arg-is-ignored'], exit=False, verbosity=2)
unittest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Deferred Initialization # + from mxnet import init, nd from mxnet.gluon import nn def getnet(): net = nn.Sequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) return net net = getnet() net.collect_params() # + [markdown] slideshow={"slide_type": "slide"} # ### Still Hasn't Initialized yet # - net.initialize() net.collect_params() # + [markdown] slideshow={"slide_type": "slide"} # ### When Really Initialized # - x = nd.random.uniform(shape=(2, 20)) net(x) net.collect_params() # + [markdown] slideshow={"slide_type": "slide"} # ## Deferred Initialization in Practice # + attributes={"classes": [], "id": "", "n": "22"} class MyInit(init.Initializer): def _init_weight(self, name, data): print('Init', name, data.shape) # The actual initialization logic is omitted here. net = getnet() net.initialize(init=MyInit()) # + [markdown] slideshow={"slide_type": "slide"} # ### Calling Initialization # + attributes={"classes": [], "id": "", "n": "25"} x = nd.random.uniform(shape=(2, 20)) y = net(x) print('2nd forward') y = net(x) # + [markdown] slideshow={"slide_type": "slide"} # # ## Forced Initialization # # # - print('already known the shape') net.initialize(init=MyInit(), force_reinit=True) print('specified the input shape') net = nn.Sequential() net.add(nn.Dense(256, in_units=20, activation='relu')) net.add(nn.Dense(10, in_units=256)) net.initialize(init=MyInit())
slides/2_21/deferred-init.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import packages import time import os import numpy as np import scipy.optimize as opt from dask.distributed import Client from dask import compute, delayed import dask.multiprocessing import pickle import matplotlib import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator, FormatStrFormatter from mpl_toolkits.mplot3d import Axes3D from ogusa import get_micro_data from ogusa.utils import DEFAULT_START_YEAR TAX_ESTIMATE_PATH = os.environ.get("TAX_ESTIMATE_PATH", ".") # -
taxfunc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## MLP MNIST with data augmentation # + ''' MLP network for MNIST digits classification w/ data augment Test accuracy: 97.7 ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function # numpy package import numpy as np from keras.models import Sequential from keras.layers import Dense, Activation from keras.preprocessing.image import ImageDataGenerator from keras.datasets import mnist from keras.utils import to_categorical # load mnist dataset (x_train, y_train), (x_test, y_test) = mnist.load_data() # compute the number of labels num_labels = np.amax(y_train) + 1 # convert to one-hot vector y_train = to_categorical(y_train) y_test = to_categorical(y_test) # image dimensions (assumed square) image_size = x_train.shape[1] input_size = image_size * image_size # we train our network using float data x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 # network parameters batch_size = 128 hidden_units = 256 data_augmentation = True epochs = 20 max_batches = len(x_train) / batch_size # this is 3-layer MLP with ReLU after each layer model = Sequential() model.add(Dense(hidden_units, input_dim=input_size)) model.add(Activation('relu')) model.add(Dense(hidden_units)) model.add(Activation('relu')) model.add(Dense(num_labels)) # this is the output for one-hot vector model.add(Activation('softmax')) model.summary() # loss function for one-hot vector # use of sgd optimizer # accuracy is good metric for classification tasks model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # validate the model on test dataset to determine generalization # score = model.evaluate(x_test, y_test, batch_size=batch_size) # print("\nTest accuracy: %.1f%%" % (100.0 * score[1])) # Run training, with or without data augmentation. if not data_augmentation: print('Not using data augmentation.') # train the network no data augmentation x_train = np.reshape(x_train, [-1, input_size]) model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size) else: print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: # we need [width, height, channel] dim for data aug x_train = np.reshape(x_train, [-1, image_size, image_size, 1]) datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=5.0, # randomly rotate images in the range (deg 0 to 180) width_shift_range=0.0, # randomly shift images horizontally height_shift_range=0.0, # randomly shift images vertically horizontal_flip=False, # randomly flip images vertical_flip=False) # randomly flip images # Compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(x_train) for e in range(epochs): batches = 0 for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=batch_size): x_batch = np.reshape(x_batch, [-1, input_size]) model.fit(x_batch, y_batch, verbose=0) batches += 1 print("Epoch %d/%d, Batch %d/%d" % (e+1, epochs, batches, max_batches)) if batches >= max_batches: # we need to break the loop by hand because # the generator loops indefinitely break # Score trained model. x_test = np.reshape(x_test, [-1, input_size]) scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # -
keras/regularization/mlp-mnist-data_augment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 07 Align the spine # # We need to make sure that all the fruits are standing upright before further modeling # - Some spine tissue is very irregular by the sides (flaps between flesh wedges) # - In such case, it is better to erode the spine to remove this extraneous flaps and keep just a bare column # - The column is aligned via PCA # - Since the sign of the eigenvectors is arbitrary when computed, final visual inspection is done to ensure that the fruit is standing on its base. # # The alignment is stored as a rotation 3x3 matrix `vh` for each fruit. # + import numpy as np import pandas as pd import glob import os import warnings warnings.filterwarnings( "ignore") import matplotlib.pyplot as plt # %matplotlib inline import tifffile as tf from scipy import ndimage import citrus_utils as vitaminC # - tissue_src = '../data/tissue/' bnames = [os.path.split(x)[-1] for x in sorted(glob.glob(tissue_src + 'WR*'))] for i in range(len(bnames)): print(i, '\t', bnames[i]) # + footpoints = 'geocentric' oil_src = '../data/oil/' oil_dst = '../data/glands/' bname = bnames[0] L = 3 lname = 'L{:02d}'.format(L) src = oil_src + bname + '/' + lname + '/' savefig = True dst = '../data/spine/' if not os.path.isdir(dst): os.makedirs(dst) spinename = tissue_src + bname + '/' + lname + '/' + bname + '_' + lname + '_spine.tif' exoname = tissue_src + bname + '/' + lname + '/' + bname + '_' + lname + '_exocarp.tif' print(spinename) # - exo = tf.imread(exoname) spine = tf.imread(spinename) scoords = np.asarray(np.nonzero(spine)) snaps = vitaminC.collapse_dimensions(spine) vitaminC.plot_collapse_dimensions(snaps, bname, 'spine') # Plot the original exocarp to get a sense if the fruit is standing upright as it is. # - This one, `WR05` is almost upright snaps = vitaminC.collapse_dimensions(exo) vitaminC.plot_collapse_dimensions(snaps, bname, 'exocarp') sz = 3 espine = ndimage.grey_erosion(spine, size=(sz,sz,sz)) tspine = vitaminC.get_largest_element(espine) # - Erorded spine # - The `x,y,z` coordinates have been aligned via PCA # - The plot confirms that the spine is standing upright vh = vitaminC.spine_based_alignment(tspine, 'eroded spine', savefig=False, dst=dst) # If the spine were to be standing upside down, we can flip the rotation by doing # ``` # vh[0] = -vh[0] # ``` # Save the rotation matrix `vh` in the same folder as the spine scan filename = tissue_src + bname + '/' + lname + '/' + bname + '_' + lname + '_vh_alignment.csv' np.savetxt(filename, vh, delimiter=',') # ### Verify that `vh` is the right rotation # # - Rotate the oil gland tissues and check if the fruit looks standing upright filename = src + bname + '_glands.tif' img = tf.imread(filename) centers = np.asarray(np.nonzero(img)) # + glands = np.matmul(centers.T, np.transpose(vh)) centerby = np.mean(glands, axis = 0) scaleby = .5*np.std(glands[:,0]) glands = (glands - centerby)/scaleby title = bname + '_' + lname + ' aligned glands' vitaminC.plot_3Dprojections(glands, title=title, writefig=False, dst=dst)
jupyter/07_align_spine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/SAURABHMASLEKAR/xyz/blob/main/neural_networks_gas_turbines_csv.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="9PtDiaWZnBWG" import pandas as pd import numpy as np #Plot Tools import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns #Model Building from sklearn.preprocessing import StandardScaler import sklearn import keras from keras.wrappers.scikit_learn import KerasRegressor from keras.models import Sequential from keras.layers import InputLayer,Dense import tensorflow as tf #Model Validation from sklearn.model_selection import cross_val_score, KFold, train_test_split from sklearn.metrics import mean_squared_error # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="aebz5Vvrnpfd" outputId="90ca67b0-0032-45e5-f4e4-753376892565" data=pd.read_csv('gas_turbines.csv') data # + colab={"base_uri": "https://localhost:8080/"} id="Q2wAg06ooA-_" outputId="6a8e4474-2329-40cf-f3a8-16ab1121fa24" data.info() # + colab={"base_uri": "https://localhost:8080/", "height": 317} id="8pV6jq0AoGhh" outputId="1e16678e-aa7b-403b-d0c5-f521b4be9a1e" data.describe() # + id="MtwXAzNHcc_s" X = data.loc[:,['AT', 'AP', 'AH', 'AFDP', 'GTEP', 'TIT', 'TAT', 'CDP', 'CO','NOX']] y= data.loc[:,['TEY']] # + id="owsmWcPIc40k" scaler = StandardScaler() X = scaler.fit_transform(X) y = scaler.fit_transform(y) # + id="w64NmNzvdAst" def baseline_model(): model = Sequential() model.add(Dense(10, input_dim=10, activation='tanh')) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') return model # + colab={"base_uri": "https://localhost:8080/"} id="6D8-G9OQdDAA" outputId="a2401956-b3a8-482a-b57e-074358876911" estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=50, batch_size=100, verbose=False) kfold = KFold(n_splits=10) results = cross_val_score(estimator, X, y, cv=kfold) print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std())) # + id="fx9JLLkhdlDi" estimator.fit(X, y) prediction = estimator.predict(X) # + colab={"base_uri": "https://localhost:8080/"} id="9osdT0codohS" outputId="d5d3a9fe-4cf0-422c-c064-e3e126cd27a8" prediction # + colab={"base_uri": "https://localhost:8080/"} id="qsOObIeadsc3" outputId="02964c4d-4f2e-4477-c5e2-33d252804dd3" a=scaler.inverse_transform(prediction) a # + colab={"base_uri": "https://localhost:8080/"} id="RJ9L93s1dypW" outputId="62d23b3b-5941-4236-8cfe-b3f524c4e922" b=scaler.inverse_transform(y) b # + colab={"base_uri": "https://localhost:8080/"} id="j6r3nwLzd54F" outputId="1778118a-bd21-4b2f-9cf0-1bf038da7321" mean_squared_error(b,a) # + id="yvDODE3jd97y" X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) # + id="-TApYMmSeBYu" estimator.fit(X_train, y_train) prediction = estimator.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="kq14HsXoeExf" outputId="0e26f5bc-990c-44e8-e8db-068cdeba2686" prediction # + id="lk4aAoyMeJ45" c=scaler.inverse_transform(prediction) # + id="mcwC4OgXeL_g" d=scaler.inverse_transform(y_test) # + colab={"base_uri": "https://localhost:8080/"} id="7WNWiPEdePMj" outputId="10d40780-3c68-45da-fc97-af983a2fef8f" mean_squared_error(d,c)
neural_networks_gas_turbines_csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from collections import Counter import math, random # + #아이들을 만들어서 조건부 확률을 계산해 보자 def random_kid(): return random.choice(["boy", "girl"]) # - kid_test_list = [random_kid() for i in range(10)] kid_test_list #random_kid 함수는 boy와 girl 두개의 값중에 하는 램덤하게 추출함 # + both_girls = 0 older_girl = 0 either_girl = 0 random.seed(0) for _ in range(10000): younger = random_kid() older = random_kid() if older == "girl": # 큰 아이가 여자일 경우 +1 older_girl += 1 if older == "girl" and younger == "girl": #둘다 여자일 경우 +1 both_girls += 1 if older == "girl" or younger == "girl": #둘중에 하나라도 여자일경우 +1 either_girl += 1 print ("P(both | older):", both_girls / older_girl) # 0.514 ~ 1/2 #큰 아이가 딸이고 둘다 딸일 확률 print ("P(both | either): ", both_girls / either_girl) # 0.342 ~ 1/3 # 둘중에 한명이 딸이면서 둘 따 딸일 확률 # - both_girls / older_girl def uniform_pdf(x): return 1 if x >= 0 and x < 1 else 0 def uniform_cdf(x): "returns the probability that a uniform random variable is less than x" if x < 0: return 0 # uniform random is never less than 0 elif x < 1: return x # e.g. P(X < 0.4) = 0.4 else: return 1 # uniform random is always less than 1 # + import numpy as np x = np.arange(-1.0, 2.0, 0.1) result_array = np.vectorize(uniform_cdf, otypes=[np.float])(x) # + import matplotlib.pyplot as plt # %pylab inline plt.plot(x, result_array) plt.axis([-1, 2, -1, 1.5]) plt.show() # + def normal_pdf(x, mu=0, sigma=1): sqrt_two_pi = math.sqrt(2 * math.pi) return (math.exp(-(x-mu) ** 2 / 2 / sigma ** 2) / (sqrt_two_pi * sigma)) for sigma_value in [1,2,0.5,1]: x = np.arange(-6.0, 6.0, 0.1) result_array = np.vectorize(normal_pdf, otypes=[np.float])(x, sigma=sigma_value) # plt.plot(x, result_array, "ro") plt.plot(x, result_array) plt.axis([-6, 6, 0, 1]) plt.show() # + def plot_normal_pdfs(plt): xs = [x / 10.0 for x in range(-50, 50)] plt.plot(xs,[normal_pdf(x,sigma=1) for x in xs],'-',label='mu=0,sigma=1') plt.plot(xs,[normal_pdf(x,sigma=0.5) for x in xs],':',label='mu=0,sigma=0.5') plt.plot(xs,[normal_pdf(x,mu=-1) for x in xs],'-.',label='mu=-1,sigma=1') plt.legend() plt.show() import matplotlib.pyplot as plt plot_normal_pdfs(plt) # - def normal_cdf(x, mu=0,sigma=1): return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2 # + def plot_normal_cdfs(plt): xs = [x / 10.0 for x in range(-50, 50)] plt.plot(xs,[normal_cdf(x,sigma=1) for x in xs],'-',label='mu=0,sigma=1') plt.plot(xs,[normal_cdf(x,sigma=2) for x in xs],'--',label='mu=0,sigma=2') plt.plot(xs,[normal_cdf(x,sigma=0.5) for x in xs],':',label='mu=0,sigma=0.5') plt.plot(xs,[normal_cdf(x,mu=-1) for x in xs],'-.',label='mu=-1,sigma=1') plt.legend(loc=4) # bottom right plt.show() import matplotlib.pyplot as plt plot_normal_cdfs(plt) # - def inverse_normal_cdf(p, mu=0, sigma=1, tolerance=0.00001): """find approximate inverse using binary search""" # if not standard, compute standard and rescale if mu != 0 or sigma != 1: return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance) low_z, low_p = -10.0, 0 # normal_cdf(-10) is (very close to) 0 hi_z, hi_p = 10.0, 1 # normal_cdf(10) is (very close to) 1 while hi_z - low_z > tolerance: mid_z = (low_z + hi_z) / 2 # consider the midpoint mid_p = normal_cdf(mid_z) # and the cdf's value there if mid_p < p: # midpoint is still too low, search above it low_z, low_p = mid_z, mid_p elif mid_p > p: # midpoint is still too high, search below it hi_z, hi_p = mid_z, mid_p else: break return mid_z # + np.vectorize(inverse_normal_cdf, otypes=[np.float])([0, 0.5, 0.90, 0.95, 0.975, 1]) # 0%, 50%, 90%, 95%, 97.5%, 100%의 확률일경우 누적분포의 확률변수값 # - def bernoulli_trial(p): return 1 if random.random() < p else 0 def binomial(p, n): return sum(bernoulli_trial(p) for _ in range(n)) # + def make_hist(p, n, num_points): data = [binomial(p, n) for _ in range(num_points)] # use a bar chart to show the actual binomial samples histogram = Counter(data) plt.bar([x - 0.4 for x in histogram.keys()], [v / num_points for v in histogram.values()], 0.8, color='0.75') mu = p * n sigma = math.sqrt(n * p * (1 - p)) # use a line chart to show the normal approximation xs = range(min(data), max(data) + 1) ys = [normal_cdf(i + 0.5, mu, sigma) - normal_cdf(i - 0.5, mu, sigma) for i in xs] plt.plot(xs,ys) plt.show() make_hist(0.75,100,1000) # - make_hist(0.50,100,1000)
notebook/ch06_probability.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/priyanshgupta1998/Machine_learning/blob/master/AnalyticsVidhya/ImageProcessing/object_detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="g-_WprmFA8OV" colab_type="code" colab={} # + id="tKmz1lMlCcQM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="ecc91c3b-c4e7-4152-fc71-dac8a7a28fc1" # !pip install https://github.com/OlafenwaMoses/ImageAI/releases/download/2.0.1/imageai-2.0.1-py3-none-any.whl # + id="oL0YwbQcG8H5" colab_type="code" colab={} from imageai.Detection import ObjectDetection import os execution_path = os.getcwd() detector = ObjectDetection() detector.setModelTypeAsRetinaNet() detector.setModelPath( os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5")) detector.loadModel() custom_objects = detector.CustomObjects(person=True, car=False) detections = detector.detectCustomObjectsFromImage(input_image=os.path.join(execution_path , "image.png"), output_image_path=os.path.join(execution_path , "image_new.png"), custom_objects=custom_objects, minimum_percentage_probability=65) for eachObject in detections: print(eachObject["name"] + " : " + eachObject["percentage_probability"] ) print("--------------------------------") # + id="VwMXUFdMG8GD" colab_type="code" colab={} from IPython.display import Image Image("image_new.png") # + id="uYYsQi55G8Ei" colab_type="code" colab={} # + id="EbymKJQrG8DM" colab_type="code" colab={} # + id="8t-tcG33G8A8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9804e9fc-68d7-40dd-8d70-81d65ca0543e" # %pylab inline import os import numpy as np import pandas as pd # from scipy.misc import imread from sklearn.metrics import accuracy_score import tensorflow as tf import keras # + id="ZqKishn_G7-H" colab_type="code" colab={} # To stop potential randomness seed = 128 rng = np.random.RandomState(seed) # + id="DhWjfj-3G771" colab_type="code" colab={} train = pd.read_csv(os.path.join(data_dir, 'Train', 'train.csv')) test = pd.read_csv(os.path.join(data_dir, 'Test.csv')) sample_submission = pd.read_csv(os.path.join(data_dir, 'Sample_Submission.csv')) train.head() # + id="fG5d138oCcK6" colab_type="code" colab={} #display an image img_name = rng.choice(train.filename) filepath = os.path.join(data_dir, 'Train', 'Images', 'train', img_name) img = imread(filepath, flatten=True) pylab.imshow(img, cmap='gray') pylab.axis('off') pylab.show() # + [markdown] id="J5ymEc3fV_St" colab_type="text" # # For easier data manipulation, let’s store all our images as numpy arrays # + id="e8FpD49pA8T0" colab_type="code" colab={} temp = [] for img_name in train.filename: image_path = os.path.join(data_dir, 'Train', 'Images', 'train', img_name) img = imread(image_path, flatten=True) img = img.astype('float32') temp.append(img) train_x = np.stack(temp) train_x /= 255.0 train_x = train_x.reshape(-1, 784).astype('float32') temp = [] for img_name in test.filename: image_path = os.path.join(data_dir, 'Train', 'Images', 'test', img_name) img = imread(image_path, flatten=True) img = img.astype('float32') temp.append(img) test_x = np.stack(temp) test_x /= 255.0 test_x = test_x.reshape(-1, 784).astype('float32') train_y = keras.utils.np_utils.to_categorical(train.label.values) # + id="nHwmuvD0A8gP" colab_type="code" colab={} split_size = int(train_x.shape[0]*0.7) train_x, val_x = train_x[:split_size], train_x[split_size:] train_y, val_y = train_y[:split_size], train_y[split_size:] # + id="17BgAMDDA8oL" colab_type="code" colab={} train.label.ix[split_size:] # + id="916j7qhxA8tV" colab_type="code" colab={} # define vars input_num_units = 784 hidden_num_units = 50 output_num_units = 10 epochs = 5 batch_size = 128 # import keras modules from keras.models import Sequential from keras.layers import Dense # create model model = Sequential([ Dense(output_dim=hidden_num_units, input_dim=input_num_units, activation='relu'), Dense(output_dim=output_num_units, input_dim=hidden_num_units, activation='softmax'), ]) # compile the model with necessary attributes model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # + id="E_eLqgjuA8yZ" colab_type="code" colab={} trained_model = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y)) # + id="0-2XjlVPA8-I" colab_type="code" colab={} pred = model.predict_classes(test_x) img_name = rng.choice(test.filename) filepath = os.path.join(data_dir, 'Train', 'Images', 'test', img_name) img = imread(filepath, flatten=True) test_index = int(img_name.split('.')[0]) - train.shape[0] print("Prediction is: ", pred[test_index]) pylab.imshow(img, cmap='gray') pylab.axis('off') pylab.show() # + id="XJeL_FWfA9Dr" colab_type="code" colab={} sample_submission.filename = test.filename; sample_submission.label = pred sample_submission.to_csv(os.path.join(sub_dir, 'sub02.csv'), index=False) # + id="w3fzfVywA87i" colab_type="code" colab={} # + [markdown] id="O2Hi5TlVYLL_" colab_type="text" # # # --- # # # # --- # # # # --- # # # # --- # # # # --- # # # # --- # # # + id="v_7uNwEZA85k" colab_type="code" colab={} # %pylab inline import os import numpy as np import pandas as pd from scipy.misc import imread from sklearn.metrics import accuracy_score import tensorflow as tf import keras from keras.models import Sequential from keras.layers import Dense, Activation, Dropout, Convolution2D, Flatten, MaxPooling2D, Reshape, InputLayer # + id="sn7GaylRA83f" colab_type="code" colab={} #set seed value # To stop potential randomness seed = 128 rng = np.random.RandomState(seed) # + id="2bpL3UOxA81_" colab_type="code" colab={} # it can be used in future root_dir = os.path.abspath('../..') data_dir = os.path.join(root_dir, 'data') sub_dir = os.path.join(root_dir, 'sub') # check for existence os.path.exists(root_dir) os.path.exists(data_dir) os.path.exists(sub_dir) # + id="991xHeXgA8wb" colab_type="code" colab={} train = pd.read_csv(os.path.join(data_dir, 'Train', 'train.csv')) test = pd.read_csv(os.path.join(data_dir, 'Test.csv')) sample_submission = pd.read_csv(os.path.join(data_dir, 'Sample_Submission.csv')) temp = [] for img_name in train.filename: image_path = os.path.join(data_dir, 'Train', 'Images', 'train', img_name) img = imread(image_path, flatten=True) img = img.astype('float32') temp.append(img) train_x = np.stack(temp) train_x /= 255.0 train_x = train_x.reshape(-1, 784).astype('float32') temp = [] for img_name in test.filename: image_path = os.path.join(data_dir, 'Train', 'Images', 'test', img_name) img = imread(image_path, flatten=True) img = img.astype('float32') temp.append(img) test_x = np.stack(temp) test_x /= 255.0 test_x = test_x.reshape(-1, 784).astype('float32') train_y = keras.utils.np_utils.to_categorical(train.label.values) # + id="8orN8HTDA8ls" colab_type="code" colab={} # define vars input_num_units = 784 hidden_num_units = 500 output_num_units = 10 epochs = 5 batch_size = 128 model = Sequential([ Dense(output_dim=hidden_num_units, input_dim=input_num_units, activation='relu'), Dense(output_dim=output_num_units, input_dim=hidden_num_units, activation='softmax'), ]) # + id="3RE6YABTA8jQ" colab_type="code" colab={} model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) trained_model_500 = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y)) # + id="C2rt4TXDA8d8" colab_type="code" colab={} # define vars input_num_units = 784 hidden1_num_units = 50 hidden2_num_units = 50 hidden3_num_units = 50 hidden4_num_units = 50 hidden5_num_units = 50 output_num_units = 10 epochs = 5 batch_size = 128 model = Sequential([ Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu'), Dense(output_dim=hidden2_num_units, input_dim=hidden1_num_units, activation='relu'), Dense(output_dim=hidden3_num_units, input_dim=hidden2_num_units, activation='relu'), Dense(output_dim=hidden4_num_units, input_dim=hidden3_num_units, activation='relu'), Dense(output_dim=hidden5_num_units, input_dim=hidden4_num_units, activation='relu'), Dense(output_dim=output_num_units, input_dim=hidden5_num_units, activation='softmax'), ]) # + id="toKolLXoA8ba" colab_type="code" colab={} model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) trained_model_5d = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y)) # + id="dksVacC3A8ZF" colab_type="code" colab={} #NOW USING DROPOUT PROPERTY TO ESCAPE FROM OVERFITTING PROBLEM # define vars input_num_units = 784 hidden1_num_units = 50 hidden2_num_units = 50 hidden3_num_units = 50 hidden4_num_units = 50 hidden5_num_units = 50 output_num_units = 10 epochs = 5 batch_size = 128 dropout_ratio = 0.2 model = Sequential([ Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu'), Dropout(dropout_ratio), Dense(output_dim=hidden2_num_units, input_dim=hidden1_num_units, activation='relu'), Dropout(dropout_ratio), Dense(output_dim=hidden3_num_units, input_dim=hidden2_num_units, activation='relu'), Dropout(dropout_ratio), Dense(output_dim=hidden4_num_units, input_dim=hidden3_num_units, activation='relu'), Dropout(dropout_ratio), Dense(output_dim=hidden5_num_units, input_dim=hidden4_num_units, activation='relu'), Dropout(dropout_ratio), Dense(output_dim=output_num_units, input_dim=hidden5_num_units, activation='softmax'), ]) # + id="UY_Fjfk1A8XL" colab_type="code" colab={} model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) trained_model_5d_with_drop = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y)) # + id="P8_xK9JNA8R5" colab_type="code" colab={} # + [markdown] id="hF62MQGKZpZP" colab_type="text" # #Now apply CNN # + id="TCfH9gzLZDfC" colab_type="code" colab={} # reshape data train_x_temp = train_x.reshape(-1, 28, 28, 1) val_x_temp = val_x.reshape(-1, 28, 28, 1) # define vars input_shape = (784,) input_reshape = (28, 28, 1) conv_num_filters = 5 conv_filter_size = 5 pool_size = (2, 2) hidden_num_units = 50 output_num_units = 10 epochs = 5 batch_size = 128 model = Sequential([ InputLayer(input_shape=input_reshape), Convolution2D(25, 5, 5, activation='relu'), MaxPooling2D(pool_size=pool_size), Convolution2D(25, 5, 5, activation='relu'), MaxPooling2D(pool_size=pool_size), Convolution2D(25, 4, 4, activation='relu'), Flatten(), Dense(output_dim=hidden_num_units, activation='relu'), Dense(output_dim=output_num_units, input_dim=hidden_num_units, activation='softmax'), ]) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) trained_model_conv = model.fit(train_x_temp, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x_temp, val_y)) # + id="49_zdAhrZDdF" colab_type="code" colab={} # + id="a6zqVEzBZDbS" colab_type="code" colab={} # + id="YsB6n5D_ZDZm" colab_type="code" colab={} # + id="fv10C57kZDW5" colab_type="code" colab={} # + id="WzrCcV7lZDVB" colab_type="code" colab={} # + id="1kKtQ9RXZDS4" colab_type="code" colab={} # + id="s84tFYBUZDPz" colab_type="code" colab={} # + id="0m2XLun6ZDNZ" colab_type="code" colab={} # + id="hyCRfu9EZDIl" colab_type="code" colab={} # + id="TGsv3pBJA8MO" colab_type="code" colab={}
AnalyticsVidhya/ImageProcessing/object_detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Natas representation of grids # # Nata has two type of representation for grids, `GridArray` and `GridDataset`. Both types are **array-like containers** which have a common interface for # # - Annotations (`.name`, `.label`, `.unit`) # - Plugins (`.plot`, `.fft`, ...) # # and both can be created **from a file** (using `.from_path`) or **from an array-like object** (using `.from_array`). from nata.containers import GridArray from nata.containers import GridDataset # ## How to create GridArray and GridDataset # Create an `GridArray` from a numpy array with annotations. # + import numpy as np example_array = np.arange(32, dtype=np.float64).reshape((4, 8)) grid = GridArray.from_array(example_array) grid # - # By default nata will provide some default parameters when creating a container object. This can be changed at creation # + from nata.containers import Axis grid = GridArray.from_array( example_array, name="some_name", label="some label", unit="some unit", time=10.0, axes=( Axis.from_array([0, 1, 2, 3], name="a1"), Axis.from_array(np.arange(8) - 10, name="a2"), ) ) grid # - # or by changing each property individually grid.name = "some_new_name" grid.label = "some new label" grid.unit = "some new unit" grid # The `GridArray` represent a grid at **one** specific time step print(f"{grid.time.shape = }") print(f"{grid.axes[0].shape = }") print(f"{grid.axes[1].shape = }") # but the `GridDataset` represents a grid at **different time steps** grid_ds = GridDataset.from_array(example_array) grid_ds print(f"{grid_ds.shape = }") print(f"{grid_ds.time.shape = }") print(f"{grid_ds.axes[0].shape = }") print(f"{grid_ds.axes[1].shape = }") # ## Reading from path # # Grids can be created by using the `.from_path` method. from nata import examples # Here are some example grid files: # !ls -l {examples.grids} # To create a `GridArray` from **one file** simply use the `.from_path` method one_grid = GridArray.from_path(examples.grids / "grid-1d-0.h5") one_grid # or create a `GridDataset` from **several files** multiple_grids = GridDataset.from_path(examples.grids / "*") multiple_grids # ## Indexing # Nata's container support object indexing, e.g. indexing along the temporal axis multiple_grids[2] # or indexing along the spatial axis multiple_grids[::2, 10:-25:3] # ## Getting the data stored type(multiple_grids.to_numpy()) multiple_grids[-1, 10:25].to_numpy() # Data in nata is stored in dask array. This allow multiple_grids.to_dask() # ## Grid containers behave like numpy arrays example = GridArray.from_array([0, 1, 4, 9], name="example", label="\chi") example sqrt_example = np.sqrt(example) sqrt_example.to_numpy() # While numpy functions are directly supported by using numpy's dispatch mechanism, annotations are not conserved sqrt_example # To cover this special cases, nata introduces plugins. They are handy helper for repeatable tasks # + from nata.containers import register_plugin @register_plugin(name="sqrt") def sqrt_for_grids(data: GridArray): result = np.sqrt(data) result.name = "sqrt_" + data.name result.label = f"\sqrt({data.label})" return result example.sqrt() # + nata_grid = GridArray.from_array([0, 1, 0, 2]) import matplotlib.pyplot as plt plt.plot(nata_grid)
examples/Grids.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/juunnn/DTSAI2019/blob/master/D19_JunaediFahmi.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="gMAbE53BADmI" colab_type="text" # # Artificial Neural Network # # --- # # # + [markdown] id="ScCoV5XvLGJi" colab_type="text" # # Single Layer Neural Network (Perceptron) # + [markdown] id="cCKJmnM2AWwL" colab_type="text" # ## Definisikan kelas Perceptron # + id="y6x01RfkAHx8" colab_type="code" colab={} import numpy as np class Perceptron(object): def __init__(self, no_input, threshold=100,learning_rate = 0.01): self.threshold = threshold self.learning_rate = learning_rate self.weights = np.zeros(no_input +1) def predict(self, inputs): summ = np.dot(self.weights[1:], inputs) + self.weights[0] return 1 if summ > 0 else 0 def train(self, training_inputs, labels): for _ in range(self.threshold): for inputs,label in zip(training_inputs, labels): prediction = self.predict(inputs) self.weights[1:] += self.learning_rate*(label - prediction)*inputs self.weights[0] += self.learning_rate*(label - prediction) # + [markdown] id="hTQ1mLMJCbpq" colab_type="text" # ## Training AND GATE # + id="5VVzfEFPCXDu" colab_type="code" outputId="eb2c3349-b8e2-4945-a152-aba925b3554f" colab={"base_uri": "https://localhost:8080/", "height": 52} training_inputs = [] training_inputs.append(np.array([1,1])) training_inputs.append(np.array([1,0])) training_inputs.append(np.array([0,1])) training_inputs.append(np.array([0,0])) labels = np.array([1,0,0,0]) and_perceptron = Perceptron(2, threshold=200, learning_rate=0.1) and_perceptron.train(training_inputs, labels) inputs = np.array([1,1]) print(and_perceptron.predict(inputs)) inputs = np.array([0,1]) print(and_perceptron.predict(inputs)) # + [markdown] colab_type="text" id="jqfMlk2AFhmM" # # ## Training OR GATE # + colab_type="code" outputId="bc70b439-fbfc-4b1c-d030-f029bd6f8a84" id="u4n-aRucFhmY" colab={"base_uri": "https://localhost:8080/", "height": 52} training_inputs = [] training_inputs.append(np.array([1,1])) training_inputs.append(np.array([1,0])) training_inputs.append(np.array([0,1])) training_inputs.append(np.array([0,0])) labels = np.array([1,1,1,0]) or_perceptron = Perceptron(2, threshold=200, learning_rate=0.1) or_perceptron.train(training_inputs, labels) inputs = np.array([1,1]) print(or_perceptron.predict(inputs)) inputs = np.array([0,0]) print(or_perceptron.predict(inputs)) # + id="zlacdxFAe6b7" colab_type="code" outputId="a5ebf401-d132-4ab2-ab37-b45decd34d01" colab={"base_uri": "https://localhost:8080/", "height": 52} training_inputs = [] training_inputs.append(np.array([1])) training_inputs.append(np.array([1])) training_inputs.append(np.array([0])) training_inputs.append(np.array([0])) labels = np.array([0,0,1,1]) not_perceptron = Perceptron(1, threshold=200, learning_rate=0.1) not_perceptron.train(training_inputs, labels) inputs = np.array([1]) print(not_perceptron.predict(inputs)) inputs = np.array([0]) print(not_perceptron.predict(inputs)) # + [markdown] colab_type="text" id="ct2oDTQ4Fubo" # ## Training XOR GATE # + colab_type="code" outputId="1463477c-6b80-4f87-beb2-f1bb5e5237d3" id="gwFK1nbzFubw" colab={"base_uri": "https://localhost:8080/", "height": 86} def xor(inputs): gate1 = not_perceptron.predict(and_perceptron.predict(inputs)) gate2 = or_perceptron.predict(inputs) return and_perceptron.predict([gate1, gate2]) training_inputs = [] training_inputs.append(np.array([1,1])) training_inputs.append(np.array([1,0])) training_inputs.append(np.array([0,1])) training_inputs.append(np.array([0,0])) for train in training_inputs: print(xor(train)) # + [markdown] id="WgQfWyzsK90c" colab_type="text" # # Multi Layer Perceptron # + id="SAFl1XzuLEHE" colab_type="code" colab={} import matplotlib.pyplot as plt import numpy as np import sklearn import sklearn.datasets import matplotlib # + id="PYI0rXoFOh-F" colab_type="code" colab={} # %matplotlib inline matplotlib.rcParams['figure.figsize'] = (10,8) # + id="sJgzx7iOOujh" colab_type="code" outputId="4335118f-1bd5-4e0a-d110-3608622f206e" colab={"base_uri": "https://localhost:8080/", "height": 503} np.random.seed(3) X, y = sklearn.datasets.make_moons(200, noise=.2) plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral) # + id="eu45Ju5EPCnq" colab_type="code" colab={} def plot_decs_bound(pred_func): # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) # + id="TOlIVWjcRCKE" colab_type="code" colab={} num_examples = len(X) nn_input_dim = 2 nn_output_dim = 2 epsilon = 0.01 reg_lambda = 0.01 # + id="4PrVNFKGRWew" colab_type="code" colab={} def calculate_loss(model): W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] z1 = X.dot(W1) + b1 a1 = np.tanh(z1) z2 = a1.dot(W2) + b2 exp_scores = np.exp(z2) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) corect_logprobs = -np.log(probs[range(num_examples), y]) data_loss = np.sum(corect_logprobs) data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2) )) return 1./num_examples * data_loss # + id="JvT2kHp9S5OO" colab_type="code" colab={} def predict(model, x): W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] z1 = x.dot(W1) + b1 a1 = np.tanh(z1) z2 = a1.dot(W2) + b2 exp_scores = np.exp(z2) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) return np.argmax(probs, axis = 1) # + id="ZJsZyJOMTHTj" colab_type="code" colab={} def build_model(nn_hdim, num_passes=20000, print_loss=False): np.random.seed(0) W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim) b1 = np.zeros((1, nn_hdim)) W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim) b2 = np.zeros((1, nn_output_dim)) model = {} for i in range(0, num_passes): # Forward propagation z1 = X.dot(W1) + b1 a1 = np.tanh(z1) z2 = a1.dot(W2) + b2 exp_scores = np.exp(z2) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # Backpropagation delta3 = probs delta3[range(num_examples), y] -= 1 dW2 = (a1.T).dot(delta3) db2 = np.sum(delta3, axis=0, keepdims=True) delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2)) dW1 = np.dot(X.T, delta2) db1 = np.sum(delta2, axis=0) # Add regularization terms (b1 and b2 don't have regularization terms) dW2 += reg_lambda * W2 dW1 += reg_lambda * W1 # Gradient descent parameter update W1 += -epsilon * dW1 b1 += -epsilon * db1 W2 += -epsilon * dW2 b2 += -epsilon * db2 # Assign new parameters to the model model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2} # Optionally print the loss. # This is expensive because it uses the whole dataset, so we don't want to do it too often. if print_loss and i % 1000 == 0: print("Loss after iteration %i: %f" %(i, calculate_loss(model))) return model # + id="RfnOrfbUUv78" colab_type="code" outputId="3031d33a-5131-4a95-d548-7506acb61d92" colab={"base_uri": "https://localhost:8080/", "height": 1000} # Build a model with a 3-dimensional hidden layer model = build_model(3, print_loss=True) # Plot the decision boundary plot_decs_bound(lambda X: predict(model, X)) plt.title("Decision Boundary for hidden layer size 3") # %% 14 plt.figure(figsize=(16, 32)) hidden_layer_dimensions = [1, 2, 3, 4, 5, 20, 50, 15] for i, nn_hdim in enumerate(hidden_layer_dimensions): plt.subplot(5, 2, i+1) plt.title('Hidden Layer size %d' % nn_hdim) model = build_model(nn_hdim) plot_decs_bound(lambda x: predict(model, x)) plt.show() # + [markdown] id="ELD9kGjma4aw" colab_type="text" # ## Perceptron # + id="WbxqoZT0a8RX" colab_type="code" colab={} import numpy as np from sklearn import datasets, linear_model import matplotlib.pyplot as plt def generate_data(): np.random.seed(0) X, y = datasets.make_moons(200, noise=0.20) return X, y def visualize(X, y, clf): plot_decision_boundary(lambda x: clf.predict(x), X, y) def plot_decision_boundary(pred_func, X, y): # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.title("Logistic Regression") plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) plt.show() def classify(X, y): clf = linear_model.Perceptron() clf.fit(X, y) return clf # + id="nz-2SMZ7bIhf" colab_type="code" outputId="b27d1497-c903-42f4-f8e4-bb1572a93ba6" colab={"base_uri": "https://localhost:8080/", "height": 499} X, y = generate_data() clf = classify(X, y) visualize(X, y, clf) # + id="GwkP9n70d_qU" colab_type="code" colab={}
D19_JunaediFahmi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="29ce58a2-2b6a-6869-8535-43ffdf2a040c" # This notebook explains how we can go about explore and prepare data for model building.The notebook is structured in the following way # # - About Dataset # - Data Summary # - Feature Engineering # - Missing Value Analysis # - Outlier Analysis # - Correlation Analysis # - Visualizing Distribution Of Data # - Visualizing Count Vs (Month,Season,Hour,Weekday,Usertype) # - Filling 0's In Windspeed Using Random Forest # - Linear Regression Model # - Regularization Models # - Ensemble Models # + [markdown] _cell_guid="783f3ec4-bb24-8414-1055-5dc24748ce68" # ## About Dataset # + [markdown] _cell_guid="d0754314-700d-e91c-6e39-55c083cf085e" # #### **Overview** # # Bike sharing systems are a means of renting bicycles where the process of obtaining membership, rental, and bike return is automated via a network of kiosk locations throughout a city. Using these systems, people are able rent a bike from a one location and return it to a different place on an as-needed basis. Currently, there are over 500 bike-sharing programs around the world. # # #### **Data Fields** # # * datetime - hourly date + timestamp # * season - 1 = spring, 2 = summer, 3 = fall, 4 = winter # * holiday - whether the day is considered a holiday # * workingday - whether the day is neither a weekend nor holiday # * weather - # * 1: Clear, Few clouds, Partly cloudy, Partly cloudy # * 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist # * 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds # * 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog # * temp - temperature in Celsius # * atemp - "feels like" temperature in Celsius # * humidity - relative humidity # * windspeed - wind speed # * casual - number of non-registered user rentals initiated # * registered - number of registered user rentals initiated # * count - number of total rentals (Dependent Variable) # + _cell_guid="548437f4-7dcb-60a4-0a79-74d9594b071b" import pylab import calendar import numpy as np import pandas as pd import seaborn as sn from scipy import stats import missingno as msno from datetime import datetime import matplotlib.pyplot as plt import warnings pd.options.mode.chained_assignment = None warnings.filterwarnings("ignore", category=DeprecationWarning) # %matplotlib inline # + [markdown] _cell_guid="ec94edf8-890a-0ba2-decc-914582ac316d" # #### **Lets Read In The Dataset** # + _cell_guid="057b1690-5b93-9f14-eafe-fad12c00da69" dailyData = pd.read_csv("../input/train.csv") # + [markdown] _cell_guid="4eab050d-65c1-7c33-5414-c408a553950d" # ## Data Summary # # As a first step lets do three simple steps on the dataset # # - Size of the dataset # - Get a glimpse of data by printing few rows of it. # - What type of variables contribute our data # + [markdown] _cell_guid="579ef8b7-02ad-7087-f27e-cc817f58f90a" # #### **Shape Of The Dataset** # + _cell_guid="4b5eee60-635b-e053-97cd-62f3e9e8acac" dailyData.shape # + [markdown] _cell_guid="d390d70a-24ba-1e0a-33af-f4bf58862f93" # #### **Sample Of First Few Rows** # + _cell_guid="664d34ff-4580-aeb1-18ea-6dca2b5ae078" dailyData.head(2) # + [markdown] _cell_guid="8c553d3a-0363-91e3-49b4-1a74a7b464c9" # #### **Variables Data Type** # + _cell_guid="6ee04436-80f8-ba7c-242e-ec78a9df5fdc" dailyData.dtypes # + [markdown] _cell_guid="25e226ba-1c12-3fd3-08d8-fe69f9748b73" # ## Feature Engineering # + [markdown] _cell_guid="0f6959ba-f75c-a9af-26b7-78fc29f1d1fd" # As we see from the above results, the columns "season","holiday","workingday" and "weather" should be of "categorical" data type.But the current data type is "int" for those columns. Let us transform the dataset in the following ways so that we can get started up with our EDA # # - Create new columns "date,"hour","weekDay","month" from "datetime" # column. # - Coerce the datatype of "season","holiday","workingday" and weather to category. # - Drop the datetime column as we already extracted useful features from it. # + [markdown] _cell_guid="81405fed-cc08-8ec4-3b0d-b65cc0f0807a" # #### **Creating New Columns From "Datetime" Column** # + _cell_guid="18f7c3fc-ffdf-4bc6-1d4c-c455fb4e0141" dailyData["date"] = dailyData.datetime.apply(lambda x : x.split()[0]) dailyData["hour"] = dailyData.datetime.apply(lambda x : x.split()[1].split(":")[0]) dailyData["weekday"] = dailyData.date.apply(lambda dateString : calendar.day_name[datetime.strptime(dateString,"%Y-%m-%d").weekday()]) dailyData["month"] = dailyData.date.apply(lambda dateString : calendar.month_name[datetime.strptime(dateString,"%Y-%m-%d").month]) dailyData["season"] = dailyData.season.map({1: "Spring", 2 : "Summer", 3 : "Fall", 4 :"Winter" }) dailyData["weather"] = dailyData.weather.map({1: " Clear + Few clouds + Partly cloudy + Partly cloudy",\ 2 : " Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist ", \ 3 : " Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds", \ 4 :" Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog " }) # + [markdown] _cell_guid="4a88cd5f-db43-ede4-e666-6bd32ef4a248" # #### **Coercing To Category Type** # + _cell_guid="51e63a0b-ab7d-9768-ee58-95d623256828" categoryVariableList = ["hour","weekday","month","season","weather","holiday","workingday"] for var in categoryVariableList: dailyData[var] = dailyData[var].astype("category") # + [markdown] _cell_guid="a2f8b5fc-cc68-878d-0534-d706f6ff9aa8" # #### **Dropping Unncessary Columns** # + _cell_guid="7ddc906c-be9f-4ec4-44be-d97e3153af3d" dailyData = dailyData.drop(["datetime"],axis=1) # + [markdown] _cell_guid="bf019e67-ab7a-44b8-2a93-1dc95fbc4875" # #### **Lets Start With Very Simple Visualization Of Variables DataType Count** # + _cell_guid="acaa4f7b-4bdb-6465-6ba8-16ed9534653d" dataTypeDf = pd.DataFrame(dailyData.dtypes.value_counts()).reset_index().rename(columns={"index":"variableType",0:"count"}) fig,ax = plt.subplots() fig.set_size_inches(12,5) sn.barplot(data=dataTypeDf,x="variableType",y="count",ax=ax) ax.set(xlabel='variableTypeariable Type', ylabel='Count',title="Variables DataType Count") # + [markdown] _cell_guid="eb5042d6-e8e2-1182-16ab-c60468e9baa2" # ## Missing Values Analysis # + [markdown] _cell_guid="d3dfb2ef-0038-3dfd-928d-890eeba0c09a" # Once we get hang of the data and columns, next step we geneally is to find out whether we have any missing values in our data. Luckily we dont have any missing value in the dataset. One way which I generally prefer to visualize missing value in the dataset is through "missingno". # # Its a quiet handy library to quickly visualize variables for missing values. As I mentioned earlier we got lucky this time as there no missing value in the dataset. # + [markdown] _cell_guid="16c28ee3-ff41-1660-5151-c5631577c3ad" # ## Skewness In Distribution ## # + _cell_guid="78650893-559b-0124-8c49-48268baad5fb" msno.matrix(dailyData,figsize=(12,5)) # + [markdown] _cell_guid="60e79824-cc02-6bc9-d3b4-d5b8753a54ef" # ## Outliers Analysis # # At first look, "count" variable contains lot of outlier data points which skews the distribution towards right (as there are more data points beyond Outer Quartile Limit).But in addition to that, following inferences can also been made from the simple boxplots given below. # # - Spring season has got relatively lower count.The dip in median value # in boxplot gives evidence for it. # - The boxplot with "Hour Of The Day" is quiet interesting.The median value are relatively higher at 7AM - 8AM and 5PM - 6PM. It can be attributed to regular school and office users at that time. # - Most of the outlier points are mainly contributed from "Working Day" than "Non Working Day". It is quiet visible from from figure 4. # + _cell_guid="e17e9a96-2d8f-5ba7-19b8-9f1f35709bb9" fig, axes = plt.subplots(nrows=2,ncols=2) fig.set_size_inches(12, 10) sn.boxplot(data=dailyData,y="count",orient="v",ax=axes[0][0]) sn.boxplot(data=dailyData,y="count",x="season",orient="v",ax=axes[0][1]) sn.boxplot(data=dailyData,y="count",x="hour",orient="v",ax=axes[1][0]) sn.boxplot(data=dailyData,y="count",x="workingday",orient="v",ax=axes[1][1]) axes[0][0].set(ylabel='Count',title="Box Plot On Count") axes[0][1].set(xlabel='Season', ylabel='Count',title="Box Plot On Count Across Season") axes[1][0].set(xlabel='Hour Of The Day', ylabel='Count',title="Box Plot On Count Across Hour Of The Day") axes[1][1].set(xlabel='Working Day', ylabel='Count',title="Box Plot On Count Across Working Day") # + [markdown] _cell_guid="687cad46-2854-3af7-bb10-7c0e96153adf" # #### **Lets Remove Outliers In The Count Column** # + _cell_guid="f95999f0-f66c-9c86-15f8-0da1587626a4" dailyDataWithoutOutliers = dailyData[np.abs(dailyData["count"]-dailyData["count"].mean())<=(3*dailyData["count"].std())] # + _cell_guid="279d83c5-8aef-da21-6651-bfd4c048a5ec" print ("Shape Of The Before Ouliers: ",dailyData.shape) print ("Shape Of The After Ouliers: ",dailyDataWithoutOutliers.shape) # + [markdown] _cell_guid="6bea646e-0102-30ef-d0e1-c745f95194a9" # ## Correlation Analysis # # One common to understand how a dependent variable is influenced by features (numerical) is to fibd a correlation matrix between them. Lets plot a correlation plot between "count" and ["temp","atemp","humidity","windspeed"]. # # - temp and humidity features has got positive and negative correlation # with count respectively.Although the correlation between them are not # very prominent still the count variable has got little dependency on # "temp" and "humidity". # - windspeed is not gonna be really useful numerical feature and it is visible from it correlation value with "count" # - "atemp" is variable is not taken into since "atemp" and "temp" has got strong correlation with each other. During model building any one of the variable has to be dropped since they will exhibit multicollinearity in the data. # - "Casual" and "Registered" are also not taken into account since they are leakage variables in nature and need to dropped during model building. # # Regression plot in seaborn is one useful way to depict the relationship between two features. Here we consider "count" vs "temp", "humidity", "windspeed". # + _cell_guid="fb27892c-7f5d-3d41-fe11-77576d2575d8" corrMatt = dailyData[["temp","atemp","casual","registered","humidity","windspeed","count"]].corr() mask = np.array(corrMatt) mask[np.tril_indices_from(mask)] = False fig,ax= plt.subplots() fig.set_size_inches(20,10) sn.heatmap(corrMatt, mask=mask,vmax=.8, square=True,annot=True) # + _cell_guid="779aa113-243f-3abc-09c9-67292ace3947" fig,(ax1,ax2,ax3) = plt.subplots(ncols=3) fig.set_size_inches(12, 5) sn.regplot(x="temp", y="count", data=dailyData,ax=ax1) sn.regplot(x="windspeed", y="count", data=dailyData,ax=ax2) sn.regplot(x="humidity", y="count", data=dailyData,ax=ax3) # + [markdown] _cell_guid="44d05ac3-327f-26ce-cc4a-f2d1e077a9a0" # ## Visualizing Distribution Of Data # As it is visible from the below figures that "count" variable is skewed towards right. It is desirable to have Normal distribution as most of the machine learning techniques require dependent variable to be Normal. One possible solution is to take log transformation on "count" variable after removing outlier data points. After the transformation the data looks lot better but still not ideally following normal distribution. # + _cell_guid="6b0caaf4-4215-e3bc-d6b4-7914696c668f" fig,axes = plt.subplots(ncols=2,nrows=2) fig.set_size_inches(12, 10) sn.distplot(dailyData["count"],ax=axes[0][0]) stats.probplot(dailyData["count"], dist='norm', fit=True, plot=axes[0][1]) sn.distplot(np.log(dailyDataWithoutOutliers["count"]),ax=axes[1][0]) stats.probplot(np.log1p(dailyDataWithoutOutliers["count"]), dist='norm', fit=True, plot=axes[1][1]) # + [markdown] _cell_guid="d15f1bf9-f695-c69a-a673-db356620bcea" # ## Visualizing Count Vs (Month,Season,Hour,Weekday,Usertype) # # - It is quiet obvious that people tend to rent bike during summer # season since it is really conducive to ride bike at that # season.Therefore June, July and August has got relatively higher # demand for bicycle. # - On weekdays more people tend to rent bicycle around 7AM-8AM and 5PM-6PM. As we mentioned earlier this can be attributed to regular school and office commuters. # - Above pattern is not observed on "Saturday" and "Sunday".More people tend to rent bicycle between 10AM and 4PM. # - The peak user count around 7AM-8AM and 5PM-6PM is purely contributed by registered user. # + _cell_guid="1c8b97d1-8022-a64a-8d7f-60254955b47e" fig,(ax1,ax2,ax3,ax4)= plt.subplots(nrows=4) fig.set_size_inches(12,20) sortOrder = ["January","February","March","April","May","June","July","August","September","October","November","December"] hueOrder = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"] monthAggregated = pd.DataFrame(dailyData.groupby("month")["count"].mean()).reset_index() monthSorted = monthAggregated.sort_values(by="count",ascending=False) sn.barplot(data=monthSorted,x="month",y="count",ax=ax1,order=sortOrder) ax1.set(xlabel='Month', ylabel='Avearage Count',title="Average Count By Month") hourAggregated = pd.DataFrame(dailyData.groupby(["hour","season"],sort=True)["count"].mean()).reset_index() sn.pointplot(x=hourAggregated["hour"], y=hourAggregated["count"],hue=hourAggregated["season"], data=hourAggregated, join=True,ax=ax2) ax2.set(xlabel='Hour Of The Day', ylabel='Users Count',title="Average Users Count By Hour Of The Day Across Season",label='big') hourAggregated = pd.DataFrame(dailyData.groupby(["hour","weekday"],sort=True)["count"].mean()).reset_index() sn.pointplot(x=hourAggregated["hour"], y=hourAggregated["count"],hue=hourAggregated["weekday"],hue_order=hueOrder, data=hourAggregated, join=True,ax=ax3) ax3.set(xlabel='Hour Of The Day', ylabel='Users Count',title="Average Users Count By Hour Of The Day Across Weekdays",label='big') hourTransformed = pd.melt(dailyData[["hour","casual","registered"]], id_vars=['hour'], value_vars=['casual', 'registered']) hourAggregated = pd.DataFrame(hourTransformed.groupby(["hour","variable"],sort=True)["value"].mean()).reset_index() sn.pointplot(x=hourAggregated["hour"], y=hourAggregated["value"],hue=hourAggregated["variable"],hue_order=["casual","registered"], data=hourAggregated, join=True,ax=ax4) ax4.set(xlabel='Hour Of The Day', ylabel='Users Count',title="Average Users Count By Hour Of The Day Across User Type",label='big') # + [markdown] _cell_guid="eaf70e2b-13e7-1a57-511f-63b4f5e7ecdb" # **So we have visualized the data to a greater extent.So lets go and build some models and see how close we can predict the results.** # + [markdown] _cell_guid="afda4292-812e-978e-5a5b-90b510fc3fa2" # ## Filling 0's In windspeed Using Random Forest ## # + [markdown] _cell_guid="4c661154-8c8d-ea89-62c6-51950a7a5c02" # **Lets Read In Train And Test Data** # + _cell_guid="46dcd640-2b74-4086-f735-ddae598c7270" dataTrain = pd.read_csv("../input/train.csv") dataTest = pd.read_csv("../input/test.csv") # + [markdown] _cell_guid="c49f4bbf-53dd-e991-f481-7ece36e8e04e" # **Combine Train And Test** # + _cell_guid="2f3c6349-9989-c85e-3e3a-c8a91816c862" data = dataTrain.append(dataTest) data.reset_index(inplace=True) data.drop('index',inplace=True,axis=1) # + [markdown] _cell_guid="c48029bc-78fd-9ce3-3c2f-86e44470e099" # **Feature Engineering** # + _cell_guid="24ee29ce-05cf-43e8-7e54-6c383bc4ac29" data["date"] = data.datetime.apply(lambda x : x.split()[0]) data["hour"] = data.datetime.apply(lambda x : x.split()[1].split(":")[0]).astype("int") data["year"] = data.datetime.apply(lambda x : x.split()[0].split("-")[0]) data["weekday"] = data.date.apply(lambda dateString : datetime.strptime(dateString,"%Y-%m-%d").weekday()) data["month"] = data.date.apply(lambda dateString : datetime.strptime(dateString,"%Y-%m-%d").month) # + [markdown] _cell_guid="29f62c82-6092-12b8-0991-a3566799c469" # **Random Forest Model To Predict 0's In Windspeed** # + _cell_guid="b70d0e4e-942e-b760-6bff-7e44d5a164c7" from sklearn.ensemble import RandomForestRegressor dataWind0 = data[data["windspeed"]==0] dataWindNot0 = data[data["windspeed"]!=0] rfModel_wind = RandomForestRegressor() windColumns = ["season","weather","humidity","month","temp","year","atemp"] rfModel_wind.fit(dataWindNot0[windColumns], dataWindNot0["windspeed"]) wind0Values = rfModel_wind.predict(X= dataWind0[windColumns]) dataWind0["windspeed"] = wind0Values data = dataWindNot0.append(dataWind0) data.reset_index(inplace=True) data.drop('index',inplace=True,axis=1) # + [markdown] _cell_guid="465940a0-450a-2c4f-5463-64e2dce719db" # **Coercing To Categorical Type** # + _cell_guid="105c47bc-b67e-aa6c-2f11-1e7da597dc6a" categoricalFeatureNames = ["season","holiday","workingday","weather","weekday","month","year","hour"] numericalFeatureNames = ["temp","humidity","windspeed","atemp"] dropFeatures = ['casual',"count","datetime","date","registered"] # + _cell_guid="c4374d94-b234-a092-5465-e5f0576c0c48" for var in categoricalFeatureNames: data[var] = data[var].astype("category") # + [markdown] _cell_guid="890dad12-8206-0df8-2f67-e7f7795e01b8" # **Splitting Train And Test Data** # + _cell_guid="578f98ce-21cd-cfde-abec-48578a661e46" dataTrain = data[pd.notnull(data['count'])].sort_values(by=["datetime"]) dataTest = data[~pd.notnull(data['count'])].sort_values(by=["datetime"]) datetimecol = dataTest["datetime"] yLabels = dataTrain["count"] yLablesRegistered = dataTrain["registered"] yLablesCasual = dataTrain["casual"] # + [markdown] _cell_guid="9a58b32c-2388-8609-5f65-66e167c5cfea" # **Dropping Unncessary Variables** # + _cell_guid="abe9b184-e467-e754-65cd-7a419dd5dd51" dataTrain = dataTrain.drop(dropFeatures,axis=1) dataTest = dataTest.drop(dropFeatures,axis=1) # + [markdown] _cell_guid="89628bfa-241c-6681-8b5f-88b1adbc36da" # **RMSLE Scorer** # + _cell_guid="25489d1b-5828-6daf-e03f-b83c20f30fe1" def rmsle(y, y_,convertExp=True): if convertExp: y = np.exp(y), y_ = np.exp(y_) log1 = np.nan_to_num(np.array([np.log(v + 1) for v in y])) log2 = np.nan_to_num(np.array([np.log(v + 1) for v in y_])) calc = (log1 - log2) ** 2 return np.sqrt(np.mean(calc)) # + [markdown] _cell_guid="53111418-c6cc-b02e-2395-a04e45b1298a" # ## **Linear Regression Model** ## # + _cell_guid="3d49a999-6d78-ea4d-06df-2ae7ccc82015" from sklearn.linear_model import LinearRegression,Ridge,Lasso from sklearn.model_selection import GridSearchCV from sklearn import metrics import warnings pd.options.mode.chained_assignment = None warnings.filterwarnings("ignore", category=DeprecationWarning) # Initialize logistic regression model lModel = LinearRegression() # Train the model yLabelsLog = np.log1p(yLabels) lModel.fit(X = dataTrain,y = yLabelsLog) # Make predictions preds = lModel.predict(X= dataTrain) print ("RMSLE Value For Linear Regression: ",rmsle(np.exp(yLabelsLog),np.exp(preds),False)) # + [markdown] _cell_guid="a5bb5e90-6500-8a98-b0e1-91620e7d0917" # ## **Regularization Model - Ridge** ## # + _cell_guid="32e1cb75-841d-9220-b806-db494d70fc1e" ridge_m_ = Ridge() ridge_params_ = { 'max_iter':[3000],'alpha':[0.1, 1, 2, 3, 4, 10, 30,100,200,300,400,800,900,1000]} rmsle_scorer = metrics.make_scorer(rmsle, greater_is_better=False) grid_ridge_m = GridSearchCV( ridge_m_, ridge_params_, scoring = rmsle_scorer, cv=5) yLabelsLog = np.log1p(yLabels) grid_ridge_m.fit( dataTrain, yLabelsLog ) preds = grid_ridge_m.predict(X= dataTrain) print (grid_ridge_m.best_params_) print ("RMSLE Value For Ridge Regression: ",rmsle(np.exp(yLabelsLog),np.exp(preds),False)) fig,ax= plt.subplots() fig.set_size_inches(12,5) df = pd.DataFrame(grid_ridge_m.grid_scores_) df["alpha"] = df["parameters"].apply(lambda x:x["alpha"]) df["rmsle"] = df["mean_validation_score"].apply(lambda x:-x) sn.pointplot(data=df,x="alpha",y="rmsle",ax=ax) # + [markdown] _cell_guid="c3bf7024-fcef-212d-98a2-6e9357b904b7" # ## **Regularization Model - Lasso** ## # + _cell_guid="c3fed778-0da1-1a38-de38-3d5dd752924b" lasso_m_ = Lasso() alpha = 1/np.array([0.1, 1, 2, 3, 4, 10, 30,100,200,300,400,800,900,1000]) lasso_params_ = { 'max_iter':[3000],'alpha':alpha} grid_lasso_m = GridSearchCV( lasso_m_,lasso_params_,scoring = rmsle_scorer,cv=5) yLabelsLog = np.log1p(yLabels) grid_lasso_m.fit( dataTrain, yLabelsLog ) preds = grid_lasso_m.predict(X= dataTrain) print (grid_lasso_m.best_params_) print ("RMSLE Value For Lasso Regression: ",rmsle(np.exp(yLabelsLog),np.exp(preds),False)) fig,ax= plt.subplots() fig.set_size_inches(12,5) df = pd.DataFrame(grid_lasso_m.grid_scores_) df["alpha"] = df["parameters"].apply(lambda x:x["alpha"]) df["rmsle"] = df["mean_validation_score"].apply(lambda x:-x) sn.pointplot(data=df,x="alpha",y="rmsle",ax=ax) # + [markdown] _cell_guid="0c4041a1-bbe5-1f8b-7277-52469fca3d4d" # ##**Ensemble Models - Random Forest**## # + _cell_guid="f72f7344-e4aa-f545-1637-ada386590401" from sklearn.ensemble import RandomForestRegressor rfModel = RandomForestRegressor(n_estimators=100) yLabelsLog = np.log1p(yLabels) rfModel.fit(dataTrain,yLabelsLog) preds = rfModel.predict(X= dataTrain) print ("RMSLE Value For Random Forest: ",rmsle(np.exp(yLabelsLog),np.exp(preds),False)) # + [markdown] _cell_guid="1b797790-a5e9-66a9-6782-d71672de3035" # ## **Ensemble Model - Gradient Boost** ## # + _cell_guid="68cab822-1b41-45e2-0168-289136c123d6" from sklearn.ensemble import GradientBoostingRegressor gbm = GradientBoostingRegressor(n_estimators=4000,alpha=0.01); ### Test 0.41 yLabelsLog = np.log1p(yLabels) gbm.fit(dataTrain,yLabelsLog) preds = gbm.predict(X= dataTrain) print ("RMSLE Value For Gradient Boost: ",rmsle(np.exp(yLabelsLog),np.exp(preds),False)) # + [markdown] _cell_guid="e99c6b8c-14a1-ccda-b9b1-a75799268b1a" # **Lets compare the distribution of train and test results. More or less the distribution of train and test looks identical. It confirms visually that our model has not predicted really bad and not suffering from major overfitting problem.** # + _cell_guid="5084c12d-b82c-5bdf-e654-256a3d91c25d" predsTest = gbm.predict(X= dataTest) fig,(ax1,ax2)= plt.subplots(ncols=2) fig.set_size_inches(12,5) sn.distplot(yLabels,ax=ax1,bins=50) sn.distplot(np.exp(predsTest),ax=ax2,bins=50) # + _cell_guid="310fc4e0-f613-9761-1957-891d0baed16b" submission = pd.DataFrame({ "datetime": datetimecol, "count": [max(0, x) for x in np.exp(predsTest)] }) submission.to_csv('bike_predictions_gbm_separate_without_fe.csv', index=False) # + [markdown] _cell_guid="38e2142a-4faa-a9e8-64ea-6e7bb61a5eb8" # **The submission will have test score of 0.41** # + [markdown] _cell_guid="23584799-059e-1fbc-bad5-0fdac11f0344" # ## **Kindly Upvote if You Find It Useful** # + _cell_guid="6487f456-f1dc-680b-51fd-9ec8b45d1bac"
Python-Notebooks/.ipynb_checkpoints/BikeSharing-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Dimensionality reduction: Hydrogen mechanism (H2_Li) # ### Step 1: Zero-dimensional simulation # + ## Dimensionality reduction: Hydrogen mechanism (H2_Li) ### Step 1: Zero-dimensional simulationimport sys import numpy as np import cantera as ct import matplotlib.pylab as plt gas = ct.Solution('/Users/aditya/work/projects/isml/jupyter/mech/H2_Li/H2_Li.cti') # simulation parameters nstep = 2500 dtime = 1.e-7 # composition gas.TPY = 1120.0, ct.one_atm*1.2, 'H2:0.00811,O2:0.18316,H2O:0.05182,N2:0.75692' r = ct.IdealGasConstPressureReactor(gas) # data matrix (size NTxNV) nt = nstep+1 nx = nt nsc = gas.Y.size nv = nsc + 1 X = np.zeros((nt,nv)) # data matrix p = np.zeros((nt)) # simulation object sim = ct.ReactorNet([r]) time = np.zeros(nt) # initial condition for i in range(nsc): X[0,i] = gas.Y[i] X[0,nv-1] = gas.T p[0] = gas.P for n in range(nstep): time[n+1]=time[n]+dtime # integrates the solution in time sim.advance(time[n+1]) # append data to X matrix for i in range(nsc): X[n+1,i] = gas.Y[i] X[n+1,nv-1] = gas.T p[n+1] = gas.P # - # #### Compute reaction rate # + nRR = gas.n_reactions # number of reactions RR = np.zeros((nt,nRR)) # reaction rates matrix # print reactions # for ir in range(nRR): # print(gas.reactions()[ir]) # compute reaction rates at each time step for it in range(nt): gas.TP = X[it,nv-1],p[it] gas.Y = X[it,:nsc] for ir in range(nRR): RR[it,ir] = gas.net_rates_of_progress[ir] # + # plot reaction rates plt.rcParams['axes.labelsize'] = 14 plt.rcParams['xtick.labelsize'] = 12 plt.rcParams['ytick.labelsize'] = 12 plt.rcParams['legend.fontsize'] = 10 plt.rcParams['figure.figsize'] = (8,6) # Get the best of both ggplot and seaborn plt.style.use('ggplot') plt.style.use('seaborn-deep') plt.rcParams['figure.autolayout'] = True # plt.figure() # plt.plot(time*1e3, RR[:,0], '-r') # plt.plot(time*1e3, RR[:,1], '-g') # plt.plot(time*1e3, RR[:,2], '-b') # plt.plot(time*1e3, RR[:,3], '-k') # plt.xlabel('Time (ms)') # plt.ylabel('Reaction rate'); # - # ### Step 2: Compute principal values and vectors # #### Scaling data matrix # + def scaleData(nf, v): # compute mean v_mean = np.mean(v, axis=0) # print(v_mean) v_max = np.max(np.abs(v), axis=0) # print(v_max) # add for loop to scale the data: NORMALIZATION for i in range(nf): v[:, i] = (v[:, i] - v_mean[i])/v_max[i] # v[:, i] = (v[:, i])/v_max[i] return None Xs = np.zeros((nx,nv)) # scaled data matrix Xs = np.copy(X) scaleData(nv,Xs) # print(Xs) # - # #### Co-variance matrix # + def covar(u): mom = np.zeros((nv, nv), dtype=float, order='F') # compute covariance matrix for j in range(nv): for i in range(nv): for n in range(nx): mom[i,j] = mom[i,j] + u[n,i] * u[n,j] mom = mom/nx # mom=np.true_divide(mom, nx) # print(mom) return mom mom2 = np.zeros((nv, nv), dtype=float, order='F') mom2 = covar(Xs) print(mom2) # - # #### Co-kurtosis tensor (matricized) # + def cokurt(u): tmp = np.zeros((nv,nv,nv,nv), dtype=float, order='F') # compute cokurtosis matrix for l in range(nv): for k in range(nv): for j in range(nv): for i in range(nv): for n in range(nx): tmp[i,j,k,l] = tmp[i,j,k,l] + u[n,i] * u[n,j] * u[n,k] * u[n,l] tmp=tmp/nx return tmp mom4_t = np.zeros((nv,nv,nv,nv), dtype=float, order='F') mom4_t = cokurt(Xs) # - mom4 = np.reshape(mom4_t,(nv,nv*nv*nv),order='F') # print(mom4) # #### Cumulant Excess-kurtosis tensor (matricized) # + def excesskurt(u): tmp = np.zeros((nv,nv,nv,nv), dtype=float, order='F') # compute cokurtosis matrix for l in range(nv): for k in range(nv): for j in range(nv): for i in range(nv): for n in range(nx): tmp[i,j,k,l] = tmp[i,j,k,l] + u[n,i] * u[n,j] * u[n,k] * u[n,l] tmp=tmp/nx for l in range(nv): for k in range(nv): for j in range(nv): for i in range(nv): tmp[i,j,k,l] = tmp[i,j,k,l] - mom2[i,j]*mom2[k,l] - mom2[i,k]*mom2[j,l] - mom2[i,l]*mom2[j,k] return tmp mom42_t = np.zeros((nv,nv,nv,nv), dtype=float, order='F') mom42_t = excesskurt(Xs) # - mom42 = np.reshape(mom42_t,(nv,nv*nv*nv),order='F') # print(mom42) # #### Principal values and vectors # + # factorize co-variance matrix: SVD of the covariance matrix U2, S2, V2 = np.linalg.svd(mom2) # factorize co-kurtosis tensor U4, S4, V4 = np.linalg.svd(mom4) # factorize excess-kurtosis tensor U42, S42, V42 = np.linalg.svd(mom42) # - # ### Step 3: write data # #### Save simulation data # + # write mass fractions, temperature and pressure with open('YTP_H2_Li.npy', 'wb') as fid: np.save(fid, X) np.save(fid, p) np.save(fid, RR) # with open('YTP_H2_Li.npy', 'rb') as fid1: # xx=np.load(fid1) # p=np.load(fid1) # + # principal values and principal vectors # second moment with open('PV2_H2_Li.npy', 'wb') as fid2: np.save(fid2, S2) np.save(fid2, U2) # raw fourth moment with open('PV4_H2_Li.npy', 'wb') as fid4: np.save(fid4, S4) np.save(fid4, U4) # cumulant kurtosis moment with open('PV42_H2_Li.npy', 'wb') as fid42: np.save(fid42, S42) np.save(fid42, U42) # - # #### Analyze principal values and vectors S2 S42 S4 # + fig, (ax1, ax2, ax3) = plt.subplots(1, 3) f1=ax1.plot(S2) ax1.set_yscale('log') f1=ax2.plot(S4) ax2.set_yscale('log') f1=ax3.plot(S42) ax3.set_yscale('log') # - S2=[5.07467661e-01, 1.36763511e-01, 4.14087013e-02, 7.89197613e-03, 6.18416176e-04, 7.75357510e-05, 1.24521417e-09, 6.42072612e-17, 2.55488515e-17, 4.15679670e-28] S42=[4.17445833e-01, 8.57624542e-02, 4.19845897e-02, 9.19474360e-03, 3.98418179e-03, 3.34678663e-04, 2.64483513e-06, 8.46442276e-16, 4.12051088e-16, 2.07629870e-16] # + import matplotlib.pyplot as plt plt.yscale('log') plt.plot(S2,'r') plt.plot(S42,'b--') # -
examples/H2_Li/PCs_H2_Li.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # # <center> SINGLETON </center> # + class SingletonTest: __instance = None def __init__(self, x, y): """ Valida que no se genere otro Objeto del mismo tipo con el Constructor.""" if SingletonTest.__instance != None: raise Exception("Esta clase genera Objetos de tipo Singleton, ya existe un Objeto Instanciado de este tipo!") else: SingletonTest.__instance = self self.x = x self.y = y @staticmethod def getInstance(x, y): """ Se genera una Instancia de la clase desde este metodo.""" if SingletonTest.__instance == None: SingletonTest(x, y) return SingletonTest.__instance s = SingletonTest(1,2) print (s.__dict__) s = SingletonTest.getInstance(3,4) print (s.__dict__) s = SingletonTest.getInstance(6,7) print (s.__dict__) SingletonTest(1,2) # - # # <center> FACTORY </center> # + from __future__ import annotations from abc import ABC, abstractmethod class IColor(ABC): @abstractmethod def mostrar(self): """ Note that the Creator may also provide some default implementation of the factory method. """ pass class ColorAmarillo(IColor): def mostrar(self) -> Color: return "SOY EL COLOR AMARILLO" class ColorAzul(IColor): def mostrar(self) -> Color: return "SOY EL COLOR AZUL" class ColorRojo(IColor): def mostrar(self) -> Color: return "SOY EL COLOR ROJO" class FabricaColores: @staticmethod def crear(color): """Factory Method""" objects = { "Amarillo": ColorAmarillo, "Azul": ColorAzul, "Rojo": ColorRojo, } if color in objects: print(f"{objects[color]().__class__} Factory de color {color} Valido") return objects[color]() raise AssertionError(f"Color {color} No Valido") class IFiguras(ABC): @abstractmethod def mostrar(self): """ Note that the Creator may also provide some default implementation of the factory method. """ pass class FiguraCircular(IFiguras): def mostrar(self) -> Figura: return "SOY EL CIRCULO" class FiguraCuadrada(IFiguras): def mostrar(self) -> Figura: return "SOY EL CUADRADO" class FiguraTriangular(IFiguras): def mostrar(self) -> Figura: return "SOY EL TRIANGULO" class FabricaFiguras: @staticmethod def crear(figura): """Factory Method""" objects = { "Circulo": FiguraCircular, "Cuadro": FiguraCuadrada, "Triangulo": FiguraTriangular, } if figura in objects: print(f"{objects[figura]().__class__} Factory de Figura {figura} Valido") return objects[figura]() raise AssertionError(f"figura {figura} No Valido") # - z = FabricaColores.crear("Amarillo") z.mostrar() z = FabricaFiguras.crear("Triangulo") z.mostrar() # # <center> ABSTRACT FACTORY </center> class IAbstractFactory(ABC): "Abstract Factory Interface" @staticmethod @abstractmethod def create_object(factory): "The static Abstract factory interface method" class AbstractFactory(IAbstractFactory): "The Abstract Factory Concrete Class" @staticmethod def create_factory_object(factory_type, object_type): "Static get_factory method" objects = { "Colores": { "fabrica":FabricaColores, "Amarillo": ColorAmarillo, "Azul": ColorAzul, "Rojo": ColorRojo }, "FabricaFiguras":{ "Circulo": FiguraCircular, "Cuadro": FiguraCuadrada, "Triangulo": FiguraTriangular } } if factory_type in objects: print(f"{objects[factory_type]} Abstract Factory {factory_type} Valido") #breakpoint() if object_type in objects[factory_type]: #breakpoint() print(f"{object_type.__class__} Factory {object_type} Valido") return objects[factory_type]["fabrica"]().crear(object_type) raise AssertionError(f"factory {object_type} No Valido") raise AssertionError(f"Abstract factory {factory_type} No Valido") z = AbstractFactory.create_factory_object('Colores',"Azul") z.mostrar() class AbstractFactory(IAbstractFactory): "The Abstract Factory Concrete Class" @staticmethod def create_factory_object(object_type): "Static get_factory method" objects = { "Colores": { "fabrica":FabricaColores, "Amarillo": ColorAmarillo, "Azul": ColorAzul, "Rojo": ColorRojo }, "Figuras":{ "fabrica":FabricaFiguras, "Circulo": FiguraCircular, "Cuadro": FiguraCuadrada, "Triangulo": FiguraTriangular } } object_return = [objects[x]['fabrica']() for x in objects if object_type in objects[x]] if object_return: print(f"el Objeto {object_type} Pertenece a la Abstract Factory {object_return[0].__class__}") return object_return[0].crear(object_type).mostrar() else: raise AssertionError(f"factory {object_type} No Valido") AbstractFactory.create_factory_object('Circulo') {}
Python/Notebooks/Patrones de Diseno.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## BP 2004 model # # <NAME>., & <NAME>. (2005, June). The 2004 BP velocity benchmark. In 67th EAGE Conference & Exhibition (pp. cp-1). European Association of Geoscientists & Engineers. # # The raw data can be downloaded from [this website](https://wiki.seg.org/wiki/2004_BP_velocity_estimation_benchmark_model#Madagascar_processing_scripts%7C) # + tags=[] import numpy as np import matplotlib.pyplot as plt import scipy from scipy.ndimage.filters import gaussian_filter from scipy.interpolate import interp2d import scipy.signal import scipy.io import obspy import os import seaborn as sns sns.set_theme() # + tags=[] in_dir = "./models_raw/" out_dir = "./models" if not os.path.exists(out_dir): os.makedirs(out_dir) # + tags=[] def calc_dx_dt(f0, v): dx = v * 1/f0 / 10 dy = dx dt = dx / v / 3 return dx, dt # + tags=[] def ricker(f, dt): # nt = np.around(10/f/dt) # src = scipy.signal.ricker(nt, a=1/f0/dt) T = np.sqrt(20) / (np.pi * f) t = np.arange(-T, T, dt) src = (1-2*np.pi**2*f**2*t**2)*np.exp(-np.pi**2*f**2*t**2) * 1e4 return src # + tags=[] def extract_data(meta): data = [] for trace in meta: data.append(trace.data) return np.array(data) # + tags=[] def load_BP_model(): if not os.path.exists(os.path.join(in_dir, "vel_z6.25m_x12.5m_exact.segy.gz")): os.system("wget {} -P {}".format("http://s3.amazonaws.com/open.source.geoscience/open_data/bpvelanal2004/vel_z6.25m_x12.5m_exact.segy.gz", in_dir)) meta = obspy.read(os.path.join(in_dir, "vel_z6.25m_x12.5m_exact.segy.gz"), format='segy') vp = extract_data(meta) * 1e3 #m/s^2 nx, ny = vp.shape dx = 12.5 dy = 6.25 x = np.arange(nx) * dx y = np.arange(ny) * dy model = {"vp": vp/1e3, "x": x, "y":y, "dx":dx, "dy":dy, "nx":nx, "ny":ny} return model BP_model = load_BP_model() # + tags=[] plt.figure(figsize=(10,5)) plt.pcolormesh(BP_model["x"][::10], BP_model["y"][::10], BP_model["vp"][::10,::10].T, shading='auto', cmap="jet") plt.axis("scaled") plt.gca().invert_yaxis() plt.colorbar(shrink=0.3) plt.show() # + tags=[] def resample_model(x, y, model): ratio_x = (x[1] - x[0])/(model['x'][1] - model['x'][0]) ratio_y = (y[1] - y[0])/(model['y'][1] - model['y'][0]) ratio = 3 vp = gaussian_filter(model['vp'], [ratio_x/ratio, ratio_y/ratio], mode='reflect') vp = interp2d(model['y'], model['x'], vp, kind='linear')(y, x) new_model = {"vp":vp, "x":x, "y":y} new_model['dx'] = x[1] - x[0] new_model['dy'] = y[1] - y[0] return new_model # + tags=[] f0 = 1.2 vp0 = np.max(BP_model['vp']) dx, dt = calc_dx_dt(f0*3, vp0) dy = dx n_pml = 10 x_range = [0, 25000] y_range = [0, 9000] t_range = [0, np.sqrt((x_range[1] - x_range[0])**2 + (y_range[1] - y_range[0])**2)/np.mean(BP_model['vp']) * 1.5] x = np.arange(x_range[0], x_range[1], dx) y = np.arange(y_range[0], y_range[1], dy) t = np.arange(t_range[0], t_range[1], dt) nx = len(x) ny = len(y) nt = len(t) src_rcv_depth = n_pml + 2 mask_extra_detph = 3 print(f"nx = {nx}, ny = {ny}, nt = {nt}") print(f"dx = {dx:.2f} m, dy = {dy:.2f} m, dt = {dt:.2e} s") # + tags=[] model = resample_model(x, y, BP_model) # + tags=[] vec = np.zeros(nt) vec_ricker = ricker(f0, dt) # vec_ricker = gaussian(f0, dt) vec[:len(vec_ricker)] = vec_ricker[:len(vec)] t = np.arange(len(vec))*dt f_vec = np.fft.fft(vec) f = np.fft.fftfreq(len(vec), dt) # sos = scipy.signal.butter(8, 6, "hp", fs=np.round(1/dt), output='sos') # # sos = scipy.signal.cheby1(4, 0.1, 6, "hp", fs=np.round(1/dt), output='sos') # filt_vec = scipy.signal.sosfilt(sos, vec) # filt_vec = scipy.signal.sosfilt(sos, filt_vec[::-1])[::-1] # filt_f_vec = np.fft.fft(filt_vec) # filt_f = np.fft.fftfreq(len(filt_vec), dt) plt.figure(figsize=(11,2.5)) plt.subplot(121) plt.plot(t, vec/1e6, label="Original Ricker") # plt.plot(t, filt_vec/1e6, "--", label="6Hz High-pass Filtering") plt.xlim([0, 2.0]) plt.xlabel("Time (s)") plt.ylabel("Amplitude") plt.legend(loc="upper right") plt.gca().ticklabel_format(axis="y",style="sci", scilimits=(0.0,0.0)) plt.text(0.05, 0.95, "(a)", horizontalalignment='left', verticalalignment='top', transform=plt.gca().transAxes, fontsize=16) plt.subplot(122) plt.plot(f[:len(f)//2], np.abs(f_vec)[:len(f)//2]/1e6, label="Original Ricker") # plt.plot(f[:len(f)//2], np.abs(filt_f_vec)[:len(filt_f)//2]/1e6, "--", label="6Hz High-pass Filtering") # plt.plot(plt.xlim(), [0, 0], '--') plt.xlim([0, 20]) plt.xticks(range(0,18+1,2)) plt.xlabel("Frequency (Hz)") plt.ylabel("Amplitude") plt.grid("major") plt.legend(loc="upper right") plt.text(0.05, 0.95, "(b)", horizontalalignment='left', verticalalignment='top', transform=plt.gca().transAxes, fontsize=16) # plt.savefig("source.png", bbox_inches="tight") # plt.savefig("source.pdf", bbox_inches="tight") plt.show() # + tags=[] plt.figure(figsize=(10,10)) plt.subplot(211) plt.pcolormesh(BP_model["x"][::10], BP_model["y"][::10], BP_model["vp"][::10,::10].T, shading='auto', cmap="jet") plt.axis("scaled") plt.xlim(x_range) plt.ylim(y_range) plt.gca().invert_yaxis() plt.colorbar(shrink=0.5) plt.subplot(212) plt.pcolormesh(model["x"], model["y"], model["vp"].T, shading='auto', cmap="jet") plt.axis("scaled") plt.gca().invert_yaxis() plt.colorbar(shrink=0.5) plt.show() # + tags=[] def add_source(nsrc, nx, nt, dt, f0=5, n_pml=0): src_list = [] src_ix = np.round(np.linspace(0+2*n_pml, nx-2*n_pml, nsrc)).astype('int') print(f"Number of sources: {len(src_ix)}; Interval between sources: {(src_ix[1]-src_ix[0])*dx:.1f} m") src_iy = np.ones_like(src_ix) * src_rcv_depth src_vec = np.zeros(nt) vec = ricker(f0, dt) src_vec[:len(vec)] = vec[:nt] for i in range(nsrc): tmp = {"ix": src_ix[i], "iy": src_iy[i], "type": 0, "vec": src_vec} src_list.append(tmp) return src_list nsrc = 8 source_list = add_source(nsrc, nx, nt, dt, f0, n_pml) # + tags=[] def add_receiver(nrcv, nsrc, nx, n_pml=0, full=False): rcv = {} if full: rcv['ix'] = np.arange(n_pml, nx-n_pml+1).astype('int') else: rcv['ix'] = np.round(np.linspace(0+n_pml, nx-n_pml, nrcv)).astype('int') rcv['iy'] = np.ones_like(rcv['ix']) * src_rcv_depth rcv['type'] = np.zeros_like(rcv['ix']) print(f"Number of receivers: {len(rcv['ix'])}; Interval of receivers: {(rcv['ix'][1]-rcv['ix'][0])*dx:.1f} m") rcv_list = [rcv] * nsrc return rcv_list nrcv = int(nx*dx / (int(vp0/f0)/8)) receiver_list = add_receiver(nrcv, nsrc, nx, n_pml, True) # + tags=[] def get_smooth_model(model, src): if mask_extra_detph > 0: vp = model['vp'].copy() vp[:,src_rcv_depth+mask_extra_detph:] = gaussian_filter(model['vp'][:, src_rcv_depth+mask_extra_detph:], [4,4], mode='reflect') else: vp = model['vp'].copy() vp = gaussian_filter(model['vp'], [4,4], mode='reflect') vp = np.mean(vp, axis=0, keepdims=True).repeat(nx, axis=0) # vp = np.mean(model['vp'], axis=0, keepdims=True).repeat(nx, axis=0) mask = np.ones_like(vp) if mask_extra_detph > 0: mask[:,:src_rcv_depth+mask_extra_detph]=0 vp[:,:src_rcv_depth+mask_extra_detph] = model['vp'][:,:src_rcv_depth+mask_extra_detph] new_model = {"vp":vp, "x":x, "y":y, "dx":dx, "dy":dy, "mask":mask} return new_model smooth_model = get_smooth_model(model, source_list[0]) # + tags=[] plt.figure() plt.plot(t, source_list[0]['vec']) plt.xlabel('Time (s)') plt.ylabel('Amplitude') plt.title("Source time function") plt.show() # + tags=[] plt.figure() plt.plot(model['vp'][nx//2,:]) plt.plot([src_rcv_depth, src_rcv_depth], plt.ylim()) # + tags=[] plt.figure(figsize=(10,7)) x_mesh, y_mesh = np.meshgrid(x, y) x0 = x[0]/1e3 plt.pcolormesh(x_mesh/1e3-x0, y_mesh/1e3, model['vp'].T/1e3, shading='auto', cmap="jet") plt.plot(x[receiver_list[0]['ix']]/1e3-x0, y[receiver_list[0]['iy']]/1e3, 'wv', alpha=0.5) for i in range(len(source_list)): plt.plot(x[source_list[i]['ix']]/1e3-x0, y[source_list[i]['iy']]/1e3, 'r*') plt.xlabel("x (km)") plt.ylabel("z (km)") plt.gca().invert_yaxis() plt.axis('scaled') plt.colorbar(shrink=0.5) plt.savefig(os.path.join(out_dir, "BP-model-vp.png")) plt.show() # + tags=[] plt.figure(figsize=(10,7)) x_mesh, y_mesh = np.meshgrid(x, y) x0 = x[0]/1e3 plt.pcolormesh(x_mesh/1e3-x0, y_mesh/1e3, smooth_model['vp'].T/1e3, shading='auto', cmap="jet") # plt.plot(x[receiver_list[0]['ix']]/1e3-x0, y[receiver_list[0]['iy']]/1e3, 'wv', alpha=0.5) # for i in range(len(source_list)): # plt.plot(x[source_list[i]['ix']]/1e3-x0, y[source_list[i]['iy']]/1e3, 'r*') plt.xlabel("x (km)") plt.ylabel("z (km)") plt.gca().invert_yaxis() plt.axis('scaled') plt.colorbar(shrink=0.5) plt.savefig(os.path.join(out_dir, "BP-model-smooth-vp.png")) plt.show() # + tags=[] plt.figure(figsize=(20,7)) plt.subplot(121) x_mesh, y_mesh = np.meshgrid(x, y) x0 = x[0]/1e3 vmax = np.max(np.abs((model['vp'] - smooth_model['vp']).T/1e3)) print(np.max(model['vp'] - smooth_model['vp'])/1e3, np.min(model['vp'] - smooth_model['vp'])/1e3) plt.pcolormesh(x_mesh/1e3-x0, y_mesh/1e3, (model['vp'] - smooth_model['vp']).T/1e3, cmap="seismic", vmax=vmax, vmin=-vmax, shading='auto') # plt.plot(x[receiver_list[0]['ix']]/1e3-x0, y[receiver_list[0]['iy']]/1e3, 'wv', alpha=0.5) for i in range(len(source_list)): plt.plot(x[source_list[i]['ix']]/1e3-x0, y[source_list[i]['iy']]/1e3, 'r*') plt.xlabel("x (km)") plt.ylabel("z (km)") plt.gca().invert_yaxis() plt.axis('scaled') plt.colorbar(shrink=0.5) plt.subplot(122) plt.pcolormesh(x_mesh/1e3-x0, y_mesh/1e3, smooth_model["mask"].T, shading='auto') plt.xlabel("x (km)") plt.ylabel("z (km)") plt.gca().invert_yaxis() plt.axis('scaled') plt.colorbar(shrink=0.5) plt.savefig(os.path.join(out_dir, "BP-model-diff-vp.png")) plt.show() # + tags=[] scipy.io.savemat(os.path.join(out_dir, 'BP-model-true.mat'), {"vp" : model['vp'], "source" : source_list, "receiver" : receiver_list, "dx" : dx, "dy" : dy, "dt" : dt, "nx" : nx, "ny" : ny, "nt" : nt, "f0" : f0}) # + tags=[] scipy.io.savemat(os.path.join(out_dir, 'BP-model-smooth.mat'), {"vp" : smooth_model['vp'], "mask": smooth_model["mask"], "source" : source_list, "receiver" : receiver_list, "dx" : dx, "dy" : dy, "dt" : dt, "nx" : nx, "ny" : ny, "nt" : nt, "f0" : f0}) # -
examples/nn_fwi/BP2004_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Contours # **Displaying Contours of an image** # # *Read and store the image* # # *Obtain the HSV image or Grayscale image of the loaded image* # # *Obtain the contoural edges of the image* # # *Find the contoural points* # # ***Draw the contour of the image*** #Import cv2 and numpy libraries import cv2 import numpy as np #Read and load the image to a variable img = cv2.imread('extras/6_squares') #Convert the image to HSV hsv = cv2.cvtColor(img , cv2.COLOR_BGR2HSV) #Find the contoural edges in the image cedges = cv2.Canny(hsv , 30 , 200) #Display the image of edges cv2.imshow("Edged Image" , cedges) cv2.waitKey(0) #Find the contoural points of the edges contours , heirarchy = cv2.findContours(cedges , cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_NONE) #Draw the contours cv2.drawContours(img , contours , -1 , (0 , 255 , 0) , 3) cv2.waitKey(0) cv2.imshow('Contours' , img) cv2.waitKey(0) #Destroy all windows cv2.destroyAllWindows() # **Displaying contours of a video of the webcam** # # *Activate the webcam* # # *Extract individual frames* # # *Convert frames to either hsv or grayscale* # # *Obtain the contoural edges of the image* # # *Find the contoural points* # # *If area of the contoural area is greater than a threshold, draw the contours* # # ***Draw the contour of individual frames so that it results in the contours of the video*** # # *Release the capture* # #Import cv2 and numpy libraries import cv2 import numpy as np # + #Video through the webcam cap = cv2.VideoCapture(0) while True: #Capture the frames from the video ret , frame = cap.read() #Convert each frame to hsv hsv = cv2.cvtColor(frame , cv2.COLOR_BGR2HSV) #Find the contoural edges in the image cedges = cv2.Canny(hsv , 30 , 200) #Obtain the contours contours , heirarchy = cv2.findContours(cedges , cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_NONE) #Iterate over each contour co-ordinate for contour in contours: #Find the area of the contour area = cv2.contourArea(contour) #Take into consideration the contour only if the area is greater than 850 so that noises are rejected if area > 850: cv2.drawContours(frame , contours , -1 , (0 , 255 , 0) , 3) #Display contour of the frame that is captured cv2.imshow("Contours" , frame) #Wait for user to press 'q' if cv2.waitKey(1) & 0xFF == ord('q'): #break the loop for displaying frames break #Stop capturing cap.release() #Destroy all windows cv2.destroyAllWindows()
Sathyashree_OpenCV_Contours.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.6 64-bit (''pytorch_env'': conda)' # metadata: # interpreter: # hash: 2b4c32494cf96406473b0631777d834036a5c9135d102aa663783fcd40faeb28 # name: python3 # --- import pandas as pd import numpy as np import unicodedata import string from __future__ import unicode_literals, print_function, division from io import open import glob import os from utils import * from sklearn.model_selection import train_test_split df = pd.read_csv("data/baby_names.csv") df = df.rename(columns = {"Child's First Name":"Name"}) df["Name"] = df["Name"].apply(str.lower) df (df["Name"] == "sophia").values.astype(int).sum() df.groupby(["Name","Ethnicity"]).mean() np.unique(df["Ethnicity"].values) def replace_ethnicity(name): if("BLACK" in name): return "AFRICAN" if("ASIAN" in name): return "ASIAN" if("HISP" in name): return "HISPANIC" return "UNWKNOWN" np.unique(df["Ethnicity"].apply(replace_ethnicity).values) df["Ethnicity"] = df["Ethnicity"].apply(replace_ethnicity) df # + tags=[] dfg = df.groupby(["Name","Ethnicity"]).sum().reset_index() count_total = dfg.groupby('Name')["Count"].sum().rename("Total").reset_index() count_total # - df_total = df.groupby(["Name","Ethnicity"]).sum().reset_index().merge(count_total) df_total["Count"] = df_total["Count"] / df_total["Total"] df_total[df_total.Count > 0.50]["Count"].hist() df_f = df_total[df_total.Count > 0.50][["Ethnicity","Name"]] df_f train_test_split(df_f) train, test = train_test_split(df_f) train.to_csv("data/train.csv") test.to_csv("data/test.csv") # !cat data/test.csv # # Deuxième dataset # + all_letters = string.ascii_letters + " .,;'" n_letters = len(all_letters) category_lines = {} all_categories = [] for filename in findFiles('data/names/*.txt'): category = os.path.splitext(os.path.basename(filename))[0] all_categories.append(category) lines = readLines(filename) category_lines[category] = lines n_categories = len(all_categories) # - L = { "Name":[], "Class":[] } for k in category_lines: for name in category_lines[k]: L["Name"].append(name) L["Class"].append(k) df = pd.DataFrame(L) df train, test = train_test_split(df) train.to_csv("data/train2.csv") test.to_csv("data/test2.csv")
Projects/Name_Classification/ETL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # metadata: # interpreter: # hash: dca0ade3e726a953b501b15e8e990130d2b7799f14cfd9f4271676035ebe5511 # name: python3 # --- # # Lab Three # --- # # For this lab we're going to be making and using a bunch of functions. # # Our Goals are: # - Searching our Documentation # - Using built in functions # - Making our own functions # - Combining functions # - Structuring solutions # + # For the following built in functions we didn't touch on them in class. I want you to look for them in the python documentation and implement them. # + # I want you to find a built in function to SWAP CASE on a string. Print it. # For example the string "HeY thERe HowS iT GoING" turns into "hEy THerE hOWs It gOing" sample_string = "HeY thERe HowS iT GoING" print(sample_string.swapcase()) # + # I want you to find a built in function to CENTER a string and pad the sides with 4 dashes(-) a side. Print it. # For example the string "Hey There" becomes "----Hey There----" sample_string = "Hey There" print(sample_string.center(17,"-")) # + # I want you to find a built in function to PARTITION a string. Print it. # For example the string "abcdefg.hijklmnop" would come out to be ["abcdefg",".","hijklmnop"] sample_string = "abcdefg.hijklmnop" print(sample_string.partition(".")) # + # I want you to write a function that will take in a number and raise it to the power given. # For example if given the numbers 2 and 3. The math that the function should do is 2^3 and should print out or return 8. Print the output. def power(number, exponent) -> int: return number ** exponent answer = power(4,4) print(answer) # + # I want you to write a function that will take in a list and see how many times a given number is in the list. # For example if the array given is [2,3,5,2,3,6,7,8,2] and the number given is 2 the function should print out or return 3. Print the output. array = [2,3,5,2,3,6,7,8,2] def number_counter(array, target): count = 0 for number in array: if number == target: count += 1 return count answer = number_counter (array, 2) print (answer) # + # Use the functions given to create a slope function. The function should be named slope and have 4 parameters. # If you don't remember the slope formula is (y2 - y1) / (x2 - x1) If this doesn't make sense look up `Slope Formula` on google. def division(x, y): return x / y def subtraction(x, y): return x - y def slope(y1, y2, x1, x2): return division(subtraction(y2, y1), subtraction (x2, x1)) answer = slope(1, 2, 1, 2) print(answer) # + # Use the functions given to create a distance function. The function should be named function and have 4 parameters. # HINT: You'll need a built in function here too. You'll also be able to use functions written earlier in the notebook as long as you've run those cells. # If you don't remember the distance formula it is the square root of the following ((x2 - x1)^2 + (y2 - y1)^2). If this doesn't make sense look up `Distance Formula` on google. import math def addition(x, y): return x + y def distance(x1, x2, y1, y2): x_side = power(subtraction(x2, x1), 2) y_side = power(subtraction(y2, y1), 2) combined_sides = addition(x_side, y_side) return math.sqrt(combined_sides) print(distance(1, 2, 1, 2)) # -
JupyterNotebooks/Labs/Lab 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # ## Create some toy data # create 3 classes each with 100 samples which are not linearly separable r = np.tile(np.r_[0:1:100j],3) t = np.r_[0:np.pi*4:300j] + np.random.rand(300) x_train = np.c_[r*np.sin(t), r*np.cos(t)] y_train = np.arange(3).repeat(100) plt.scatter(x_train[:,0], x_train[:,1], c=y_train, cmap=plt.cm.Paired) # ## Define a Neural Network model class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size, num_class): super().__init__() self.fc1=nn.Linear(input_size, hidden_size) self.relu1=nn.ReLU() self.fc2=nn.Linear(hidden_size, num_class) def forward(self, x): out = self.fc1(x) out = self.relu1(out) out = self.fc2(out) return out model = NeuralNet(2,128,3) loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.01) for epoch in range(1, 10001): y_pred_score = model(torch.from_numpy(x_train.astype(np.float32))) loss = loss_fn(y_pred_score, torch.from_numpy(y_train.astype(np.int64))) optimizer.zero_grad() loss.backward() optimizer.step() if epoch%1000==1: print(f'Loss: {loss} at epoch: {epoch}') plot_decision_boundary(x_train, y_train, model) def predict(m, X): if isinstance(X, np.ndarray): X = torch.from_numpy(X.astype(np.float32)) return torch.max(m(X),1)[1] def plot_decision_boundary(x_train, y_train, model): x1_min, x2_min = x_train.min(0) - 0.5 x1_max, x2_max = x_train.max(0) + 0.5 x1, x2 = np.meshgrid(np.arange(x1_min, x1_max, 0.01), np.arange(x2_min, x2_max, 0.01)) y_pred = predict(model, np.c_[x1.ravel(), x2.ravel()]) plt.pcolormesh(x1,x2,y_pred.data.numpy().reshape(x1.shape), cmap=plt.cm.Paired) plt.scatter(x_train[:,0], x_train[:,1], c=y_train, edgecolors='k', cmap=plt.cm.Paired) plt.show() plot_decision_boundary(x_train, y_train, model) class NeuralNet2(nn.Module): def __init__(self, input_size, hidden_layer, num_classes): super().__init__() self.fc1=nn.Linear(input_size, hidden_layer) self.relu1 = nn.ReLU() self.fc2=nn.Linear(hidden_layer,hidden_layer) self.relu2 = nn.ReLU() self.fc3=nn.Linear(hidden_layer, num_classes) def forward(self, x): out=self.fc1(x) out=self.relu1(out) out=self.fc2(out) out=self.relu2(out) out=self.fc3(out) return out model = NeuralNet2(2,64,3)
backup/.ipynb_checkpoints/pytorch_basics_nn_2018_05_16-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- s = [] L = ['a', 'b', 'c' , 'd'] #set of star actors in 10 first result of search engine for i in range(0 , len(L)) : for j in range(i+1,len(L)) : s.append(list((L[i],L[j]))) #make a nested list with all possible dual combination s m1 = ['a', 'c', 'f', 'g'] m2 = ['j' , 'a', 'b' , 'd'] counter = 0 for i in s : print(all(elem in m2 for elem in i )) edge = [] counter = 0 for i in s : #for each dual combination for j in range(len(df.index)): # search in the whole dataset if(all(elem in df.iloc[j]["starring"] for elem in i )) : #if both elements of the combination was in the document counter = counter + 1 if (counter == 2 ): # we want them to be together in at least 2 movies edge.append(tuple(i)) break edge = [] for i in s : edge.append(tuple(i)) edge import networkx as nx import matplotlib.pyplot as plt G = nx.Graph() G.add_nodes_from(L) G.add_edges_from(edge) nx.draw(G, with_labels = 1) plt.savefig("path_graph1.png") plt.show()
bonus.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Python statistics essential training - 04_02_confidenceintervals # Standard imports import math import io # + import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as pp # %matplotlib inline # - import scipy.stats import scipy.optimize import scipy.spatial poll = pd.read_csv('poll.csv') poll.info() poll.head() poll.vote.value_counts(normalize=True) def sample(brown, n=1000): return pd.DataFrame({'vote': np.where(np.random.rand(1000) < brown, 'Brown', 'Green')}) s = sample(0.51, n=1000) s.vote.value_counts(normalize=True) dist = pd.DataFrame((sample(0.51).vote.value_counts(normalize=True) for i in range(1000))) dist.head() dist.Brown.hist(histtype='step', bins=20); def samplingdlist(brown, n=1000): return pd.DataFrame((sample(brown, n).vote.value_counts(normalize=True) for i in range(1000))) def quantiles(brown, n=1000): dist = samplingdlist(brown, n) return dist.Brown.quantile(0.025), dist.Brown.quantile(0.975) quantiles(0.50) quantiles(0.49) quantiles(0.48) quantiles(0.54) dist = samplingdlist(0.50, 10000) dist.Brown.hist(histtype='step') largepoll = pd.read_csv('poll-larger.csv') largepoll.vote.value_counts(normalize=True)
Python Statistics/Exercise Files/chapter4/04_02/04_02_confidenceintervals_begin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pyspark Hive Playground # ## A playground for enthusiasts # # Author: GGordon (https://github.com/gggordon) # ## Utility Functions # + def run_process(command): """ Runs a process on the command line """ import subprocess command = str(command) command = command.split(" ") try: result = subprocess.run(command, stdout=subprocess.PIPE, stderr = subprocess.PIPE) output = result.stdout.decode("utf8") return { "output": None if len(output.strip()) == 0 else output , "error":result.stderr.decode("utf8") } except Exception as e: return {"output":None,"error":str(e)} def pfile_exists(file_path): return run_process("ls {}".format(file_path))["output"] is not None def print_process_output(process_output, separator=None): """ Prints output of `run_process` params: process_output: dict with keys "output" and "error" separator: str , marker to separate output and error """ if process_output is not None: print( process_output["output"] if "output" in process_output else "") print("-"*32 if separator is None else separator) print( process_output["error"] if "error" in process_output else "") ppo = print_process_output # - # ## Install Pyspark if run_process("which pyspark")["output"] is None: print("Installing Pyspark...") result = run_process("pip install pyspark") ppo(result) else: print("Pyspark already installed") # ## Retrieve retail db files as json # + # Uncomment Below to get some retail data # # !mkdir -p data/retaildbjson # # !wget -O data/retaildbjson/products.json https://github.com/dgadiraju/data/raw/master/retail_db_json/products/part-r-00000-158b7037-4a23-47e6-8cb3-8cbf878beff7 # # !wget -O data/retaildbjson/orders.json https://github.com/dgadiraju/data/raw/master/retail_db_json/orders/part-r-00000-990f5773-9005-49ba-b670-631286032674 # # !wget -O data/retaildbjson/orderitems.json https://github.com/dgadiraju/data/raw/master/retail_db_json/order_items/part-r-00000-6b83977e-3f20-404b-9b5f-29376ab1419e # # !wget -O data/retaildbjson/departments.json https://github.com/dgadiraju/data/raw/master/retail_db_json/departments/part-r-00000-3db7cfae-3ad2-4fc7-88ff-afe0ec709f49 # # !wget -O data/retaildbjson/customers.json https://github.com/dgadiraju/data/raw/master/retail_db_json/customers/part-r-00000-70554560-527b-44f6-9e80-4e2031af5994 # # !wget -O data/retaildbjson/categories.json https://github.com/dgadiraju/data/raw/master/retail_db_json/categories/part-r-00000-ce1d8208-178d-48d3-bfb2-1a97d9c05094 # - # # Analytics Using Spark from pyspark import SparkConf, SparkContext from pyspark.sql import SparkSession spark_master = "local[*]" sqlContext = SparkSession.builder.appName("SCHAD Data Profiling") if spark_master: sqlContext = sqlContext.master(spark_master) sqlContext = sqlContext.getOrCreate() sqlContext from IPython.display import display as idisplay sqlContext.sql(""" SELECT color from ( SELECT * from VALUES ("Red"),("Blue"), ("Green"), ("Yellow") as Colors (color) ) colors """).show()
pyspark-hive-playground.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dataset Creation <a class="tocSkip"> # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Defining-input-data" data-toc-modified-id="Defining-input-data-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Defining input data</a></span></li><li><span><a href="#Choosing-and-configuring-degradations" data-toc-modified-id="Choosing-and-configuring-degradations-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Choosing and configuring degradations</a></span></li><li><span><a href="#Excerpt-length" data-toc-modified-id="Excerpt-length-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Excerpt length</a></span></li><li><span><a href="#Reformatted-data---piano-roll-and-command" data-toc-modified-id="Reformatted-data---piano-roll-and-command-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Reformatted data - piano roll and command</a></span></li><li><span><a href="#Cleaning-up-and-specifying-output-directory" data-toc-modified-id="Cleaning-up-and-specifying-output-directory-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Cleaning up and specifying output directory</a></span></li><li><span><a href="#Reproducibility" data-toc-modified-id="Reproducibility-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Reproducibility</a></span></li><li><span><a href="#Help" data-toc-modified-id="Help-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Help</a></span></li></ul></div> # - # We provide tools to create your own ACME datasets. You can: # * Use your own midi or csv data, or pick from our configured data to automatically download # * Choose what types of degradation to include, and supply parameters for them # * Define how long the excerpts taken should be # * Create re-formatted, compressed representations of the data # * Ensure reproducibility # # If you want to degrade data on-the-fly, we also provide a `Degrader()` class which can be used in conjunction with a dataloader. This is described in a subsequent notebook [04_data_parsers_and_degrader.ipynb](./04_data_parsers_and_degrader.ipynb). # # In this notebook, we provide some example calls. # # Defining input data # We have three datasets which will automatically download if specified: `PPDDSep2018Monophonic`, `PPDDSep2018Polyphonic`, and `PianoMidi`. The default is to download and use them all. To not download them, set the `--datasets` argument to `None`. # # This command uses default parameters to create an acme dataset with just the `PianoMidi` dataset. It will take a few moments to run, and you can observe the output in adjacent folder called `./acme`. # ! python ../make_dataset.py --datasets PianoMidi # If you don't want to use any automatic downloaders, you must **specify your own input data**. You can provide midi files, or csv data (in an expected format - see the introduction [01_the_ACME_dataset.ipynb](01_the_ACME_dataset.ipynb) for the expected format). # # The below command will create a very small dataset with some of the data which will have been cached if the first command in this notebook was run. # ```bash # make_dataset.py \ # --datasets None \ # --local-midi-dirs ~/.mdtk_cache/PianoMidi/brahms # ``` # # Choosing and configuring degradations # See the next notebook, [03_degradation_functions.ipynb](03_degradation_functions.ipynb), for a full description of all the degradations available. # This call again works with the small brahms data and: # * leaves 44% of the data clean (no degradation is applied) # * selects only `pitch_shift` and `time_shift` degradations # * attempts to perform these degradations at a ratio of 4 to 1 (sampling is done) # * sets some parameters for the `pitch_shift` degradation # ```bash # python make_dataset.py \ # --datasets None \ # --local-midi-dirs ~/.mdtk_cache/PianoMidi/brahms \ # --clean-prop .44 \ # --degradations pitch_shift time_shift \ # --degradation-dist 4 1 \ # --degradation-kwargs '{"pitch_shift__min_pitch": 50, "pitch_shift__max_pitch": 80}' \ # --seed 42 # ``` # Specifying the `--degradation-kwargs` as a json string can get finickity with quotes, so you can specify the path to a json file instead e.g. # ```bash # python make_dataset.py \ # --datasets None \ # --local-midi-dirs ~/.mdtk_cache/PianoMidi/brahms \ # --clean-prop .44 \ # --degradations pitch_shift time_shift \ # --degradation-dist 4 1 \ # --degradation-kwargs deg_kwargs.json \ # --seed 42 # ``` # # where `deg_kwargs.json` is: # ``` # { # "pitch_shift__min_pitch": 50, # "pitch_shift__max_pitch": 80 # } # ``` # # Excerpt length # You can define the minimum length for an excerpt in milliseconds and number of notes (both conditions are honoured). Note that the defaults are `5000` and `10` respectively. See `mdtk.df_utils.get_random_excerpt` for full details of how the excerpt selection is done. # # This example produces excerpts of approximately 10 seconds in length, with a minimum of 20 notes in them. # ```bash # python make_dataset.py \ # --datasets None \ # --local-midi-dirs ~/.mdtk_cache/PianoMidi/brahms \ # --excerpt-length 10000 \ # --min-notes 20 # ``` # # Reformatted data - piano roll and command # By, default, we create compressed data which is reformatted for easy loading to models. This can be turned off by setting `--formats None`. # ```bash # python make_dataset.py \ # --datasets None \ # --local-midi-dirs ~/.mdtk_cache/PianoMidi/brahms \ # --formats None # ``` # We discuss the format data in a subsequent notebook: [04_data_parsers_and_degrader.ipynb](./04_data_parsers_and_degrader.ipynb). # # Cleaning up and specifying output directory # To remove any cached files, you can run `python make_dataset.py --clean`. A prompt is given by default, this can be cancelled with `--no-prompt`. Also, note that the output directory is deleted and recreated with every run of the script. Again, the user is prompted prior to deletion, but this can be skipped with `--no-prompt`. Alternatively, a new path for the output to be written to can be provided with `--output-dir`. Examples: # ```bash # python make_dataset.py --clean # prompts user before deleting cache # python make_dataset.py --clean --no-prompt # deletes cache with no prompt # python make_dataset.py --datasets PianoMidi # create a fresh dataset # python make_dataset.py # this raises a prompt to delete the old one located at ./acme # python make_dataset.py --output-dir ./new/output/dir # ``` # # Reproducibility # To ensure that you get the same result when you run the script again, set the `--seed` parameter. This must be a number between `0` and `2**32 - 1`. # ```bash # make_dataset.py --seed 42 # ``` # # Help # ! ../make_dataset.py -h
docs/02_dataset_creation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Required Install # !pip install librosa #Import Dependencies from pylab import * import os import pandas as pd import librosa import librosa.display import glob import random # # Sample one audio File # + data, sampling_rate = librosa.load('../data/emotion_speech/03-01-08-01-02-02-02.wav') plt.figure(figsize=(12, 4)) print(data, sampling_rate) librosa.display.waveplot(data, sr=sampling_rate) # - # # RAVDASS AUDIO SET # # # Generate list of file and apply emotion str # + ##Get list of Files in folder file_name = [] for root, dirs, files in os.walk("../data/emotion_speech/."): for filename in files: file_name.append(filename) file_name_df = pd.DataFrame(file_name, columns = ['file_name']) file_name_df['encoding']= file_name_df['file_name'] file_name_df['ID'] = file_name_df.index file_name_df['dir']= 'emotion_speech' file_name_df # + #Create DF seperating Values # Filename identifiers # Modality (01 = full-AV, 02 = video-only, 03 = audio-only). # Vocal channel (01 = speech, 02 = song). # Emotion (01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised). # Emotional intensity (01 = normal, 02 = strong). NOTE: There is no strong intensity for the 'neutral' emotion. # Statement (01 = "Kids are talking by the door", 02 = "Dogs are sitting by the door"). # Repetition (01 = 1st repetition, 02 = 2nd repetition). # Actor (01 to 24. Odd numbered actors are male, even numbered actors are female). encoding_df = file_name_df["encoding"].str.split("-", n=-1, expand=True) encoding_df.columns = ['Modality','Vocal_channel','Class','Intensity','Statement','Repetition','Actor'] """ 1. Added a new column - Gender 2. Populated Gender column after stripping .wav from 'Actor' 3. Convert data type from obj to string then to int which is needed to determine odd or even """ encoding_df['Actor'] = encoding_df['Actor'].map(lambda actor: actor.rstrip('.wav')) encoding_df['Gender'] = 'Gender' encoding_df['Actor'].astype(str).astype(int) encoding_df """ 1. Loop through each row in dataframe 'Actor' column 2. Set value of Gender to Female if Actor value is even or Male if odd """ for index, row in encoding_df.iterrows(): if int(row[6]) % 2 == 0: row['Gender'] = '1' else: row['Gender'] = '0' encoding_df final_df = encoding_df.drop(columns=['Modality','Vocal_channel','Intensity','Statement','Repetition','Actor']) final_df.loc[final_df.Class == '01', 'Class'] = '02' final_df.loc[final_df.Class == '02', 'Emotion'] = 'calm' final_df.loc[final_df.Class == '03', 'Emotion'] = 'happy' final_df.loc[final_df.Class == '04', 'Emotion'] = 'sad' final_df.loc[final_df.Class == '05', 'Emotion'] = 'angry' final_df.loc[final_df.Class == '06', 'Emotion'] = 'fearful' final_df.loc[final_df.Class == '07', 'Emotion'] = 'disgust' final_df.loc[final_df.Class == '08', 'Emotion'] = 'surprise' final_df # - #merge data frame pre_merged_df = file_name_df.join(final_df, how='outer') merged_df = pre_merged_df.drop(columns=['encoding']) merged_df.set_index('ID') merged_df.Class.value_counts() # # TORONTO EMOTIONAL SPEECH DATASET # + ##Get list of Files in folder t_file_name = [] for root, dirs, files in os.walk("../data/toronto_speech/."): for filename in files: t_file_name.append(filename) t_file_name_df = pd.DataFrame(t_file_name, columns = ['file_name']) t_file_name_df['encoding']= t_file_name_df['file_name'] t_file_name_df['ID'] = t_file_name_df.index t_file_name_df['dir'] = 'toronto_speech' t_file_name_df.head(20) # + t_encoding_df = t_file_name_df["encoding"].str.split("_", n=-1, expand=True) t_encoding_df.columns = ['Actor','Word','Emotion'] t_encoding_df['Gender'] = '1' #t_encoding_df t_final_df = t_encoding_df.drop(columns=['Actor','Word']) t_final_df['Emotion'] = t_final_df['Emotion'].map(lambda emotion: emotion.rstrip('.wav')) #t_final_df = t_final_df['Emotion'].str.strip('.wav') t_final_df toronto_df = pd.DataFrame(t_final_df) toronto_df # + #Fix odd values in file and convert Nuetral class to calm toronto_df.loc[toronto_df.Emotion == 'ps', 'Emotion'] = 'surprise' toronto_df.loc[toronto_df.Emotion == 'alm', 'Emotion'] = 'calm' toronto_df.loc[toronto_df.Emotion == 'ngry', 'Emotion'] = 'angry' toronto_df.loc[toronto_df.Emotion == 'neutral', 'Emotion'] = 'calm' toronto_df.loc[toronto_df.Emotion == 'fear', 'Emotion'] = 'fearful' #assign emotion value numbers toronto_df.loc[toronto_df.Emotion == 'calm', 'Class'] = '02' toronto_df.loc[toronto_df.Emotion == 'happy', 'Class'] = '03' toronto_df.loc[toronto_df.Emotion == 'sad', 'Class'] = '04' toronto_df.loc[toronto_df.Emotion == 'angry', 'Class'] = '05' toronto_df.loc[toronto_df.Emotion == 'fearful', 'Class'] = '06' toronto_df.loc[toronto_df.Emotion == 'disgust', 'Class'] = '07' toronto_df.loc[toronto_df.Emotion == 'surprise', 'Class'] = '08' toronto_df.head(15) # - #merge data frame and export CSV t_pre_merged_df = t_file_name_df.join(toronto_df, how='outer') t_merged_df = t_pre_merged_df.drop(columns=['encoding']) t_merged_df = t_merged_df[['file_name', 'dir', 'Class','Gender','Emotion','ID']] t_merged_df t_merged_df.set_index('ID') t_merged_df.Class.value_counts() # # CONCAT DATASETS & RESET INDEX dataset_df = pd.concat([merged_df, t_merged_df]) dataset2 = dataset_df.reset_index(drop=True) dataset2 = dataset2.drop(columns=['Class']) dataset2['ID']= dataset2.index final_dataset_df = dataset2 final_dataset_df final_dataset_df.Emotion.value_counts() final_dataset_df.Gender.value_counts() final_dataset_df.to_csv(r'../data/gender_feature_data.csv', index = True) train = pd.read_csv('../data/gender_feature_data.csv') data_dir = ('../data') train.head() # # CREATE FEATURES train.Gender.value_counts() # + def parser(row): # function to load files and extract features file_name = os.path.join(os.path.abspath(data_dir), str(row.dir), str(row.file_name)) # handle exception to check if there isn't a file which is corrupted try: # extraction X, sample_rate = librosa.load(file_name, res_type='kaiser_fast') # extract mfcc data mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=128).T,axis=0) except Exception as e: print("Error encountered while parsing file: ", file_name) return None, None feature = mfccs label = row.Gender emotion = row.Emotion return [feature, label, emotion] temp = train.apply(parser, axis=1) # + features = [x[0] for x in temp] features labels = [x[1] for x in temp] labels emotions = [x[2] for x in temp] emotions # - features_df = pd.DataFrame(data=features) features_df # + features_df["label"] = labels features_df["emotions"] = emotions features_df # - #Write to CSV features_df.to_csv('../data/gender_data.csv', index=False)
data_prep/data_prep_gender.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div align="right"><NAME></div> # # # WWW: Who Will Win? # # This is a notebook for reasoning about who will win playoff series in basketball (and could be used for other sports). The table of contents for this notebook: # # * [Model Discussion](#Models:-Introduction-and-Discussion) What model should I use to make predictions? # * [2019 NBA Playoffs](#2019-NBA-Playoffs) **Will be updated as the playoffs progress.** # * [2018 NBA Playoffs](#2018-NBA-Playoffs) An exciting season. # * [2016 NBA Playoffs](#2016-NBA-Playoffs) Ancient history; sad saga. # # --- # # # Models: Introduction and Discussion # # "It's tough to make predictions, especially [about](https://en.wikiquote.org/wiki/Yogi_Berra) [the](https://en.wikiquote.org/wiki/Niels_Bohr) future." That's true for the NBA basketball playoffs, where there is a wide range of opinions. Here are some models you might choose to help you make predictions: # # 1. **Holistic**: I just feel that my favorite team has about a 1 in 5 chance of winning it all. # 2. **Game by Game**: I think my team has a 75% chance of winning each game in the first round, then 65% for each game in the second round, but only 45% in the Conference finals, then 55% if they make it to the NBA finals. From that I'll calculate their overall chance. # 3. **Point by Point**: My team has a per-game average point differential of +5.79; I'll compare that to the other teams and caclulate overall chances. # 4. **Play by Play**: Use [detailed statistics](https://www.basketball-reference.com/play-index/plus/shot_finder.cgi) and overhead video to [model](https://danvatterott.com/blog/2016/06/16/creating-videos-of-nba-action-with-sportsvu-data/) the game shot-by-shot, or even pass-by-pass. Not covered here. # # # Point by Point Model # # The **[Simple Rating System](https://www.sportingcharts.com/dictionary/nba/simple-rating-system-statistics.aspx) (SRS)** records the average point differential of a team over the season, with a slight adjustment for strength of schedule (see [basketball-reference.com](https://www.basketball-reference.com/leagues/NBA_2018.html)) . A great team has an SRS around 10; anything over 5 is very good. # # The Point-by-Point model says: a game is decided by a random sample from the distribution of point differentials, which is a normal (Gaussian) distribution centered around the difference of SRS scores of the two teams. So, if a team with an SRS of 7 plays an opponent with an SRS of 4, we can expect the team to win by 3, on average, but it will still lose some games. We need to know the standard deviation of the distribution to determine how often that happens; [Betlabs](https://www.betlabssports.com/blog/a-look-at-nba-team-totals/) says the standard deviation # is 10.5 points across the NBA. # The function `win_game` does the calculation of win probability given an SRS point differential, using Monte Carlo simulation: # + # %matplotlib inline import matplotlib.pyplot as plt from statistics import mean from random import gauss from collections import Counter def win_game(srs_diff, 𝝈=10.5, n=100000): "Given SRS point differential of a team against another, return game win probability." return mean(gauss(srs_diff, 𝝈) > 0 for game in range(n)) # - win_game(3) # So, if your team has a 3-point edge over an opponent, this model predicts your team will win 61% of the time. # # # # Game by Game Model # # The next model says that a playoff series is a sequence of independent and identically distributed game results (where the probability of a single-game win could be specified using SRS, or holistically, or some other model). The idea here is to be consistent: if you believe that a team's win percentage is 60%, and you believe that games are independent, then you must believe that the team's chance of wining 4 in a row is 0.6<sup>4</sup> = 0.1296. This model ignores the fact that games aren't strictly independent, ignores the possibility of injuries, and ignores home court advantage. Why? Because these factors would change the final winning estimate by only a few percentage points, and I already have more uncertainty than that. # # The function `win_series` calculates the probability of winning a series, given the probability of winning a game: def win_series(p, W=0, L=0): """Probability of winning best-of-7 series, given a probability p of winning a game. The optional arguments say how many Wins and Losses the team has in the series so far.""" return (1 if W == 4 else 0 if L == 4 else p * win_series(p, W + 1, L) + (1 - p) * win_series(p, W, L + 1)) # We can make a table: for srs_diff in range(10): g = win_game(srs_diff) print('{} point differential = {:4.0%} win game = {:4.0%} win series'.format( srs_diff, g, win_series(g))) # With a zero point differential obviously you're at 50% win percententage; with a 3 point differential you're at 61% to win a game, and 73% to win the series. This agrees very well with the "Differential vs. Win Percentage" [chart](http://a.espncdn.com/combiner/i?img=%2Fphoto%2F2018%2F0408%2F180408_differential.png&w=1140&cquality=40) on [this page](http://www.espn.com/nba/story/_/id/23071005/kevin-pelton-weekly-mailbag-including-nba-all-offensive-teams). # # # # We can also do plots: diff = [d/10 for d in range(101)] game = [win_game(d) for d in diff] series = [win_series(p) for p in game] plt.plot(diff, [100*s for s in series], label='Series Win') plt.plot(diff, [100*g for g in game], label='Game Win') plt.legend(loc='best'); plt.grid() plt.xlabel('Point Differential'); plt.ylabel('Win Percentage'); P = range(101) plt.plot(P, [100*win_series(p/100) for p in P]) plt.grid() plt.xlabel('Game Win Percentage'); plt.ylabel('Series Win Percentage'); # # Series by Series Model # # The probability of winning a championship can be modeled by assuming that each series is independent, and multiplying the probability of winning each series. The function `playoffs` does this; it takes as arguments a team name and then some playoff round entries, each consisting of the opponent team name, the game win percentage against this opponent, and optionally the wins and losses in the series so far. # # The function `playoffs` does two calculations, one based on my subjective probability of the team winning a single game (as specified in the entry for each round), and one based on the SRS differential. In each line of the output and for each of these two calculations, we show the probability of winning a single game, the probability of winning the series (given that the two teams are still playing), and the probability of winning all the series up to this point. # + def playoffs(team, *rounds): "Print probability for team winning each series (subjective and SRS)." all_p = all_srs = 1.0 # Probabilities of winning it all print('{} {:8} | Subjective Probabilities | SRS Differential' .format('Team'.ljust(len(team)), 'Opponent')) for (opponent, p, *WL) in rounds: p_srs = win_game(SRS[team] - SRS[opponent]) all_p, str_p = update(all_p, p, *WL) all_srs, str_srs = update(all_srs, p_srs, *WL) print('{} vs {:8} | {} |{}'.format(team, opponent, str_p, str_srs)) def update(all_p, p, W=0, L=0): "Update the probability of winning it all, `all_p`, and also return a string." series_p = win_series(p, W, L) all_p *= series_p return all_p, '{} Game;{} Series;{} All'.format(pct(p), pct(series_p), pct(all_p)) pct = '{:4.0%}'.format # - # --- # # 2019 NBA Playoffs # # ## 12 April, 2019 # # Here are the SRS scores and my subjective ratings for what I consider the two top teams in 2019, the Warriors and Bucks, and their likely playoff opponents: # + SRS = dict(Bucks=8.04, Warriors=6.42, Raptors=5.49, Rockets=4.96, Nuggets=4.19, Celtics=3.90, Sixers=2.25, Clippers=1.09, Pistons=-0.56) playoffs('Warriors', ('Clippers', 0.80), ('Rockets', 0.65), ('Nuggets', 0.70), ('Bucks', 0.52)) # - playoffs('Bucks', ('Pistons', 0.80), ('Celtics', 0.67), ('Raptors', 0.60), ('Warriors', 0.48)) # Let's compare championship predictions for four methods: my subjective evaluations, the SRS point differentials, and two methods from [538](https://projects.fivethirtyeight.com/2019-nba-predictions/): ELO, which is similar to SRS, and their more complex CARM-ELO model: # # # |Method|Warriors|Bucks| # |------|--------|-----| # |Subjective| 37% | 26% | # |SRS | 13% | 35% | # | ELO | 16% | 23% | # | CARM-ELO| 61% | 15% | # # # # Which prediction method is best? I have low confidence in the SRS ratings, because the Warriors seemed like they were coasting for parts of the regular season and are capable of "flipping the switch" in the playoffs, and because the Bucks have significant injuries to Brogdon, Mirotic and Gasol, all of whom contributed to the Bucks' great record in the season but will miss parts of the playoffs. # # # 1 May, 2019 # # The first round of playoffs was pretty uneventful&mdash;the favored team won in each of the eight matchups. Here's where we are today: playoffs('Warriors', ('Clippers', 0.80, 4, 2), ('Rockets', 0.65, 2, 0), ('Nuggets', 0.70), ('Bucks', 0.52)) playoffs('Bucks', ('Pistons', 0.80, 4, 0), ('Celtics', 0.67, 1, 1), ('Raptors', 0.60), ('Warriors', 0.48)) # # 8 May, 2019 # # The favored teams keep winning: three of them are ahead 3-2, and the fourth, the Bucks, won their series 4-1. But the Warriors suffered the loss of a second starter, <NAME>, to injury, and it is unclear how long he'll be out, # so I'm uncertain how to adjust the subjective probabilities: playoffs('Warriors', ('Clippers', 0.80, 4, 2), ('Rockets', 0.50, 3, 2), ('Nuggets', 0.60), ('Bucks', 0.50)) playoffs('Bucks', ('Pistons', 0.80, 4, 0), ('Celtics', 0.67, 4, 1), ('Raptors', 0.60), ('Warriors', 0.50)) # --- # # # 2018 NBA Playoffs # # ## 12 April, 2018 # # The Las Vegas oddsmakers have the Warriors and Rockers as co-favorites at 35% chance to win the title, while [538](https://fivethirtyeight.com/features/the-nba-playoffs-sleepers-favorites-and-best-first-round-matchups/), using their CARM-ELO model, favor the Rockets at 44% and give the Warriors only a 4% chance. Those big differences underscore that rational people can use different models with different assumptions and come to different conclusions. SRS = dict(Raptors=7.29, Celtics=3.23, Sixers=4.30, Cavs=0.59, Rockets=8.21, Warriors=5.79, Blazers=2.60, Thunder=3.42, Jazz=4.47, Spurs=2.89, Wolves=2.35) playoffs('Rockets', ('Wolves', 0.75), ('Jazz', 0.70), ('Warriors', 0.55), ('Raptors', 0.60)) # So I'm in good agreement with the Vegas oddsmakers about the Rockets: my subjective probabilities have the Rockets at 49% winning the conference and 35% winning the title, while Vegas had them at 44% and 35%. The SRS differential has them at 48% and 27%. playoffs('Warriors', ('Spurs', 0.75), ('Blazers', 0.65), ('Rockets', 0.45), ('Raptors', 0.55)) # For the Warriors my subjective probabilities are splitting the difference between 538's low estimate (8% win conference, 4% win title) and Vegas's high estimate (44% and 35%, tied with the Rockets); I have them at 29% and 18%. The SRS differential at 17% and 6% is closer to 538. # # How do I reconcile the discrepancy between my subjective probabilities and the SRS numbers? I guess I would say that I have less faith in the point differential model, for several reasons: it counts games from the distant past, when some teams had very different lineups than they have now (due to injuries and trades); different teams have different approaches to how they handle games whose outcome is already decided; the metric puts too much emphasis on blowouts, for example, in the Warriors' final game, it was to their strategic advantage to lose, and they did it very convincingly&mdash;by 40 points, which dropped their average point differential for the entire year by 0.5 points. # --- # # # 2016 NBA Playoffs # # # ## 18 April 2016 # # The Golden State Warriors have had a historic basketball season, winning more games than any other team ever has. But will they top that off by winning the championship? There are 15 other teams in contention, including one, the Spurs, that has had a historic season as the best second-best team ever. The web site fivethirtyeight, using a complicated scoring system, [gives](http://projects.fivethirtyeight.com/2016-nba-picks/) the Warriors a 44% chance of winning, with the Spurs at 28%. Basketball-reference [has](http://www.basketball-reference.com/friv/playoff_prob.cgi) the Warriors at 41% and Spurs at 32.5%, while a [betting site](http://www.oddsshark.com/nba/nba-futures) had the Warriors at 54% and Spurs at 18%. Here is my analysis: # # + SRS = dict(Warriors=10.38, Spurs=10.28, Thunder=7.09, Cavs=5.45, Raptors=4.08, Rockets=0.34, Clippers=4.13, Pistons=0.43, Hawks=3.49, Memphis=-2.14, Blazers=4.43, Dallas=-0.87) playoffs('Warriors', ('Rockets', 0.83), ('Clippers', 0.73), ('Spurs', 0.58), ('Cavs', 0.67)) # - playoffs('Spurs', ('Memphis', 0.83), ('Thunder', 0.62), ('Warriors', 0.42), ('Cavs', 0.67)) playoffs('Cavs', ('Pistons', 0.83), ('Hawks', 0.60), ('Raptors', 0.55), ('Warriors', 0.33)) # I have the Warriors at 50% (for the estimate of winning it all) and the Spurs at 20%, so I'm more of a Warriors fan than fivethirtyeight and basketball-reference. Interestingly, while fivethirtyeight does not think this year's Warriors are better than the 1995 Bulls, they [do think](http://fivethirtyeight.com/features/the-warriors-still-arent-the-best-team-ever/) the Spurs, Thunder, and Cavs are the best ever second-, third-, and fourth-best teams in a season. # # WWWWC: Will Warriors Win Without Curry? # # ## 27 April 2016 # # The Playoff picture has changed! # # We have some results for first-round series, and there have been key injuries to players including <NAME>, <NAME>, <NAME>, and <NAME>. We don't know for sure how long Curry will be out, but here are my updated odds for the Warriors, under the assumption that Curry misses the second round, and comes back in time for the Western Conference Finals at a mildly reduced capacity: playoffs('Warriors', ('Rockets', 0.70, 3, 1), ('Blazers', 0.55), ('Spurs', 0.55), ('Cavs', 0.60)) # The Spurs and Cavs are rolling; let's update their odds: playoffs('Spurs', ('Memphis', 0.83, 4, 0), ('Thunder', 0.62), ('Warriors', 0.45), ('Cavs', 0.67)) playoffs('Cavs', ('Pistons', 0.83, 4, 0), ('Hawks', 0.60), ('Raptors', 0.55), ('Warriors', 0.40)) # So my updated odds are that the Warriors and Spurs are roughly equally likely to win (26% and 24%); the Cavs are still less likely (13%), and there is more uncertainty. # # # # WWWWCB: Will Warriors Win With Curry Back? # # ## 10 May 2016 # # Curry has returned from his injury, and after a slow shooting start, had the highest-scoring overtime period in the history of the NBA. Meanwhile, the Thunder lead the Spurs, 3-2, and the Cavaliers have been dominant in the East, hitting a historic number of 3-point shots. Here is my revised outlook: playoffs('Warriors', ('Rockets', 0.70, 4, 1), ('Blazers', 0.67, 3, 1), ('Spurs', 0.60), ('Cavs', 0.55)) playoffs('Spurs', ('Memphis', 0.83, 4, 0), ('Thunder', 0.60, 2, 3), ('Warriors', 0.40), ('Cavs', 0.50)) playoffs('Thunder', ('Dallas', 0.83, 4, 1), ('Spurs', 0.40, 3, 2), ('Warriors', 0.40), ('Cavs', 0.45)) playoffs('Cavs', ('Pistons', 0.83, 4, 0), ('Hawks', 0.60, 4, 0), ('Raptors', 0.65), ('Warriors', 0.45)) # So overall, from the start of the playoffs up to May 10th, I have: # # - **Warriors:** Dropped from 50% to 26% with Curry's injury, and rebounded to 42%. # - **Spurs:** Dropped from 20% to 5% after falling behind Thunder. # - **Thunder:** Increased to 7%. # - **Cavs:** Increased to 31%. # # # Time to Panic Yet? # # ## 17 May 2016 # # The Thunder finished off the Spurs and beat the Warriors in game 1. Are the Thunder, like the Cavs, peaking at just the right time, after an inconsistant regular season? Is it time for Warriors fans to panic? # # Sure, the Warriors were down a game twice in last year's playoffs and came back to win both times. Sure, the Warriors are still 3-1 against the Thunder this year, and only lost two games all season to elite teams (Spurs, Thunder, Cavs, Clippers, Raptors). But the Thunder are playing at a top level. Here's my update, showing that the loss cost the Warriors 5%: playoffs('Warriors', ('Rockets', 0.70, 4, 1), ('Blazers', 0.67, 4, 1), ('Thunder', 0.63, 0, 1), ('Cavs', 0.55)) # # Yet! # # ## 24 May 2016 # # The Thunder won two in a row (first time the Warriors had lost two in a row all year), putting the Warriors down 3-1. And the Cavs are looking mortal, losing two to the Raptors. So now it looks to me like the Thunder are favorites to win it all: playoffs('Warriors', ('Rockets', 0.70, 4, 1), ('Blazers', 0.67, 4, 1), ('Thunder', 0.55, 1, 3), ('Cavs', 0.55)) playoffs('Cavs', ('Pistons', 0.83, 4, 0), ('Hawks', 0.60, 4, 0), ('Raptors', 0.55, 2, 2), ('Thunder', 0.45)) playoffs('Thunder', ('Dallas', 0.83, 4, 1), ('Spurs', 0.40, 4, 2), ('Warriors', 0.45, 3, 1), ('Cavs', 0.55)) # ## 1 June 2016: The Finals # # The Warriors completed their comeback against the Thunder, putting them in a great position to win this year (and they are already established as [favorites for next year](http://www.foxsports.com/nba/story/golden-state-warriors-title-favorites-cleveland-cavaliers-odds-2016-17-053016)). I think the Warriors have about a 55% game win probability (and thus about 61% series probability) against the Cavs. Rather than update this game by game, I'll give a table of series win probability based on game win probability and the number of games won so far. For example, if you agree the Warriors have a 55% game win percentage, and if they win the first game, then check the "1-0" row and the "55%" column to see that their series win probability has gone up to 74%. # + pcts = [p/100 for p in range(15, 90, 5)] print('W-L | Game Win Percentage') print(' | ' + ' '.join(map(pct, pcts))) for W in range(4): print('----+' + '-' * 5 * len(pcts)) for L in reversed(range(4)): results = [win_series(p, W, L) for p in pcts] print('{}-{} | {}'.format(W, L, ' '.join(map(pct, results)))) # - # # 20 June 2016 # # Congratulations to LeBron, Kyrie, Coach Lue, and all the Cavs for overcoming long odds to win a championship for Cleveland. My model says the Warriors were at 91% win probability when they were up 3-1 (97% if you go by point differential): win_series(0.55, W=3, L=1) win_series(win_game(SRS['Warriors'] - SRS['Cavs']), W=3, L=1) # # Additional Calculations: Series Length # # Given a team's game win percentage, how many games should we expect a series to run? For example, for a team with a game win percentage of 55%, how likely is it to sweep all 4 games? To go to 7 games? Here's a chart of the probability of each possible series outcome, based on the win percentage of the first team: # + def series_results(p, weight=1, W=0, L=0) -> Counter: """Return {(win, loss): probability} for all possible outcomes of the series.""" if W == 4 or L == 4: return Counter({(W, L): weight}) else: return (series_results(p, weight * p, W + 1, L) + series_results(p, weight * (1 - p), W, L+1)) def series_results_table(pcts=pcts): outcomes = [(4, 0), (4, 1), (4, 2), (4, 3), (3, 4), (2, 4), (1, 4), (0, 4)] print('W-L | Game Win Percentage') print(' | ' + ' '.join(map(pct, pcts))) print('----+' + '-' * 5 * len(pcts)) for (W, L) in outcomes: results = [series_results(p)[W, L] for p in pcts] print('{}-{} | {}'.format(W, L, ' '.join(map(pct, results)))) series_results_table() # - # Our hypothetical 55% team has a 9% chance of sweeping, and a 17+14 = 31% chance of going to 7 games.
ipynb/WWW.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) from distance_matching_functional import DistanceMatching from distance_matching_lowrank import DistanceMatching as DMR_LR import functions import utils sys.path.append("../baselines/") from baselines import baselines import matplotlib.pyplot as plt # %matplotlib inline remake_delta_Z = False refit_mixture = False refit_vc = False # - # Load Data X_train = np.load("X_train.npy") Y_train = np.load("Y_train.npy") Z_train = np.load("Z_train.npy") X_test = np.load("X_test.npy") Y_test = np.load("Y_test.npy") Z_test = np.load("Z_test.npy") delta_Z = np.load("delta_Z.npy") K = Z_train.shape[1] print(K) # + #from sklearn.decomposition import PCA #pca = PCA(n_components=5) #X_train = pca.fit_transform(X_train) #X_test = pca.transform(X_test) X_train = X_train[:, :5] X_test = X_test[:, :5] X_train = np.hstack((X_train, np.expand_dims(np.ones_like(X_train[:, 0]), 1))) X_test = np.hstack((X_test, np.expand_dims(np.ones_like(X_test[:, 0]), 1))) # + desired_Z_features = 2 if Z_train.shape[1] > desired_Z_features: print("Reducing Z size...", end=' ') from sklearn.decomposition import PCA pca = PCA(n_components=desired_Z_features) Z_train = pca.fit_transform(Z_train) Z_test = pca.transform(Z_test) print("Finished.") K = Z_train.shape[1] dZ = [ lambda x,y: functions.safe_wrapper(x, y, functions.abs_diff), lambda x,y: functions.safe_wrapper(x, y, functions.abs_diff) ] from sklearn.preprocessing import normalize Z_train, norms = normalize(Z_train, norm='l1', axis=0, return_norm=True) Z_test /= norms print(Z_train.shape) print(Z_test.shape) # + # Linear Regression baseline sys.path.append("../") from sklearn.linear_model import LinearRegression as LR lr = LR() lr_coef = [] for i in range(Y_train.shape[1]): lr.fit(X_train, Y_train[:, i]) print(lr.score(X_test, Y_test[:, i])) lr_coef.append(lr.coef_) lr_coef = np.array(lr_coef) # - from sklearn.linear_model import ElasticNet en = ElasticNet(alpha=1e-3, l1_ratio=0.1, fit_intercept=False, normalize=True) en.fit(X_train, Y_train) print(en.score(X_test, Y_test)) # + remake_delta_U_test_train = False N_train = len(X_train) if remake_delta_U_test_train: N_test = len(X_test) N_train = len(X_train) delta_U_test_train = np.zeros((N_test, N_train, K)) print("Calculating Delta_U Test/Train") for i in range(N_test): print("{} / {}".format(i, N_test), end='\r') for j in range(N_train): delta_U_test_train[i, j, :] = np.array([dZ[k](Z_train[j, k], Z_test[i, k]) for k in range(K)]) np.save("delta_U_test_train.npy", delta_U_test_train) else: delta_U_test_train = np.load("delta_U_test_train.npy") def calc_test_err(beta, X, Y, delta_U_test_train, n_neighbors, phi_u=None, mse=True, mae=False): if phi_u is None: phi_u = np.ones_like(U_train[0], dtype='float64') K = len(delta_U_test_train[0, 0]) N_train = delta_U_test_train.shape[1] err1 = 0. err2 = 0. predictions = np.zeros_like(Y) for i in range(len(X)): closest = np.argsort( np.array([delta_U_test_train[i, j, :].dot(phi_u) for j in range(N_train)])) knn = np.mean(beta[closest[:n_neighbors]], axis=0) #predictions[i, 0] = X[i, :].T.dot(knn[0]) #predictions[i, 1] = X[i, :].T.dot(knn[1]) if mse: err1 += functions.logistic_loss(X[i], Y[i, 0], knn[0]) err2 += functions.logistic_loss(X[i], Y[i, 1], knn[1]) #err1 += (Y[i, 0] - predictions[i, 0])**2 #err2 += (Y[i, 1] - predictions[i, 1])**2 elif mae: err1 += functions.logistic_loss(X[i], Y[i, 0], knn[0]) err2 += functions.logistic_loss(X[i], Y[i, 1], knn[1]) #err1 += np.abs(Y[i, 0] - predictions[i, 0]) #err2 += np.abs(Y[i, 1] - predictions[i, 1]) return (err1+err2) / (2*len(X)), predictions # - refit_mixture=True if refit_mixture: mixture_beta = [] n_classes = 15 for i in range(Y_train.shape[1]-1): print("Fitting for task #{}".format(i)) task_beta, task_assignments = baselines.mixture_model_logistic( X_train, np.expand_dims(Y_train[:, i], 1), n_classes=n_classes, lam=1e-3, n_restarts=5, verbosity=100, init_lr=2e-1, eps=1e-1) mixture_beta.append(task_beta) plt.figure() plt.hist(task_assignments, bins=n_classes) mixture_beta.append(np.zeros_like(mixture_beta[0])) mixture_beta = np.swapaxes(np.array(mixture_beta), 0, 1) np.save("beta_mixture.npy", mixture_beta) else: mixture_beta = np.load("beta_mixture.npy") print(mixture_beta.shape) print(X_train.shape) train_preds_mix = np.array([np.tensordot(X_train[i], mixture_beta[i].T, axes=1) for i in range(len(X_train))]) print(train_preds_mix.shape) print(train_preds_mix) print(Y_train) mapped_train_preds = np.exp(train_preds_mix) / (1 + np.exp(train_preds_mix)) from sklearn.metrics import mean_squared_error a = mean_squared_error(Y_train[:, 0], mapped_train_preds[:, 0]) b = mean_squared_error(Y_train[:, 1], mapped_train_preds[:, 1]) print((a+b)/2) from sklearn.metrics import r2_score a = r2_score(Y_train[:, 0], mapped_train_preds[:, 0]) b = r2_score(Y_train[:, 1], mapped_train_preds[:, 1]) print((a+b)/2) (logistic_loss, mixture_preds) = calc_test_err(mixture_beta, X_test, Y_test, delta_U_test_train, 1, np.ones((delta_U_test_train.shape[2]))) mapped_test_preds = np.exp(mixture_preds) / (1 + np.exp(mixture_preds)) # + a = mean_squared_error(Y_test[:, 0], mapped_test_preds[:, 0]) b = mean_squared_error(Y_test[:, 1], mapped_test_preds[:, 1]) print((a+b)/2) a = r2_score(Y_test[:, 0], mapped_test_preds[:, 0]) b = r2_score(Y_test[:, 1], mapped_test_preds[:, 1]) print((a+b)/2) # - refit_vc = True if refit_vc: vc_beta = [] vc_z = [] for i in range(Y_train.shape[1]-1): print("Fitting for task #{}".format(i)) init_Z = np.zeros((Z_train.shape[1]+1, X_train.shape[1])) #init_Z[0, -1] = np.mean(Y_train[:, i]) # intercept task_beta, task_Z = baselines.vc_logistic( X_train, np.expand_dims(Y_train[:, i], 1), Z_train, lam=1e-5, lr=1e-4, verbosity=10, n_restarts=1, init_Z=init_Z, max_iters=10000) print(task_beta) print(task_Z) vc_z.append(task_Z) vc_beta.append(task_beta) vc_beta.append(np.zeros_like(vc_beta[0])) vc_beta = np.swapaxes(np.array(vc_beta), 0, 1) print(vc_beta.shape) np.save("beta_vc.npy", vc_beta) else: vc_beta = np.load("beta_vc.npy") Z_bordered = np.hstack((Z_train, np.ones((len(Z_train), 1)))) print(Z_train.shape) vc_beta = np.array([z.dot(vc_z) for z in Z_bordered]) vc_preds = np.array([X_train[i].dot(vc_beta[i].T) for i in range(len(X_train))]) plt.hist(vc_preds[:, 0]) plt.show() # + calc_mse = lambda a,b,c,d,e,f: calc_test_err(a,b,c,d,e,f,mse=True, mae=False) vc_preds_train = np.array([np.tensordot(X_train[i], vc_beta[i].T, axes=1) for i in range(len(X_train))]) mapped_vc_preds_train = np.exp(vc_preds_train / (1+vc_preds_train)) (vc_mse_test, vc_preds_test) = calc_mse(vc_beta, X_test, Y_test, delta_U_test_train, 1, np.ones((K))) mapped_vc_preds_test = np.exp(vc_preds_test / (1+vc_preds_test)) #print(calc_mse(vc_beta, X_train, Y_train, delta_U_train_train, 1, np.ones((K)))) # + a = mean_squared_error(Y_train[:, 0], vc_preds_train[:, 0]) b = mean_squared_error(Y_train[:, 1], vc_preds_train[:, 1]) print(a, b, (a+b)/2) a = mean_squared_error(Y_train[:, 0], mapped_vc_preds_train[:, 0]) b = mean_squared_error(Y_train[:, 1], mapped_vc_preds_train[:, 1]) print((a+b)/2) a = r2_score(Y_train[:, 0], mapped_vc_preds_train[:, 0]) b = r2_score(Y_train[:, 1], mapped_vc_preds_train[:, 1]) print((a+b)/2) # + a = mean_squared_error(Y_test[:, 0], mapped_vc_preds_test[:, 0]) b = mean_squared_error(Y_test[:, 1], mapped_vc_preds_test[:, 1]) print((a+b)/2) a = r2_score(Y_test[:, 0], mapped_vc_preds_test[:, 0]) b = r2_score(Y_test[:, 1], mapped_vc_preds_test[:, 1]) print((a+b)/2) # - # Baseline - overfit to intercept N = X_train.shape[0] P = X_train.shape[1] T = vc_beta.shape[1] overfit = np.zeros((N, T, P)) overfit[:, :, -1] = np.log(np.clip(Y_train, 0.01, 0.99)) print(overfit) # Baseline - Mean base = np.zeros((N, 3, P)) base[:, :, -1] = np.mean(Y_train, axis=0) # TODO: Should this be log? print(base) # + #### Deep Learning benchmark. import warnings warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") warnings.filterwarnings("ignore", message="the matrix subclass is") import tensorflow as tf # + batch_size = 25 # Define a model with many linear regression atoms. def xavier_init(fan_in, fan_out, constant=1): #Xavier initialization of network weights low = -constant*np.sqrt(6.0/(fan_in + fan_out)) high = constant*np.sqrt(6.0/(fan_in + fan_out + 1)) return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32) try: tf.reset_default_graph() except: pass lr = 1e-4 lr_decay = 1-1e-4 learning_rate = tf.Variable(lr) X_shape = X_train.shape[1] + Z_train.shape[1] Y_shape = [3] x = tf.placeholder(tf.float32, [None, X_shape]) y = tf.placeholder(tf.float32, [None, 3]) n_hidden = [250, 250, 250, 250] eta_1 = tf.verify_tensor_all_finite( tf.Variable(tf.random_uniform( (X_shape, n_hidden[0]), minval=-1, maxval=1, dtype=tf.float32)), 'eta_1 not finite') b_1 = tf.Variable(tf.zeros([n_hidden[0]], dtype=tf.float32)) eta_2 = tf.verify_tensor_all_finite( tf.Variable(tf.random_uniform( (n_hidden[0], n_hidden[1]), minval=-1, maxval=1, dtype=tf.float32)), 'eta_2 not finite') b_2 = tf.Variable(tf.zeros([n_hidden[1]], dtype=tf.float32)) eta_3 = tf.verify_tensor_all_finite( tf.Variable(tf.random_uniform( (n_hidden[1], n_hidden[2]), minval=-1, maxval=1, dtype=tf.float32)), 'eta_3 not finite') b_3 = tf.Variable(tf.zeros(n_hidden[2], dtype=tf.float32)) eta_4 = tf.verify_tensor_all_finite( tf.Variable(tf.random_uniform( (n_hidden[2], n_hidden[3]), minval=-1, maxval=1, dtype=tf.float32)), 'eta_4 not finite') b_4 = tf.Variable(tf.zeros(n_hidden[3], dtype=tf.float32)) eta_5 = tf.verify_tensor_all_finite( tf.Variable(tf.random_uniform( (n_hidden[3], Y_shape[0]), minval=-1, maxval=1, dtype=tf.float32)), 'eta_5 not finite') b_5 = tf.Variable(tf.zeros(Y_shape, dtype=tf.float32)) hidden_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, eta_1), b_1)) hidden_2 = tf.nn.sigmoid(tf.add(tf.matmul(hidden_1, eta_2), b_2)) hidden_3 = tf.nn.sigmoid(tf.add(tf.matmul(hidden_2, eta_3), b_3)) hidden_4 = tf.nn.sigmoid(tf.add(tf.matmul(hidden_3, eta_4), b_4)) y_hat = tf.add(tf.matmul(hidden_4, eta_5), b_5) loss = tf.losses.mean_squared_error(y[:, 0], y_hat[:, 0]) + tf.losses.mean_squared_error(y[:, 1], y_hat[:, 1]) loss += 1e-2*tf.add_n([tf.nn.l2_loss(v) for v in [eta_1, eta_2, eta_3, eta_4] ]) # + optimizer = tf.train.GradientDescentOptimizer( learning_rate=learning_rate).minimize(loss) try: init = tf.global_variables_initializer() sess = tf.InteractiveSession() sess.run(init) except UserWarning: sess.close() init = tf.global_variables_initializer() sess = tf.InteractiveSession() sess.run(init) # - n_epochs = 1000 XZ_train = np.hstack((X_train, Z_train)) print(X_train.shape) print(XZ_train.shape) learning_rate *= 2 print(sess.run((learning_rate))) for i in range(n_epochs): learning_rate *= lr_decay permutation = np.random.permutation(len(X_train)) total_cost = 0 for idx in range(0, len(X_train)-batch_size, batch_size): start = idx end = idx + batch_size opt, cost, y_hat_val, eta_1_val = sess.run((optimizer, loss, y_hat, eta_1), feed_dict={ x: XZ_train[permutation[start:end]], y: Y_train[permutation[start:end]] }) total_cost += cost #print(y_hat_val, eta_1_val)#, Y_train[permutation[start:end]]) print(i, total_cost) # + predictions_deep = sess.run((y_hat), feed_dict={ x: np.hstack((X_train, Z_train)) }) test_predictions_deep = sess.run((y_hat), feed_dict={ x: np.hstack((X_test, Z_test)) }) # + a = mean_squared_error(Y_train[:, 0], predictions_deep[:, 0]) b = mean_squared_error(Y_train[:, 1], predictions_deep[:, 1]) print(a, b, (a+b)/2) a = mean_squared_error(Y_test[:, 0], test_predictions_deep[:, 0]) b = mean_squared_error(Y_test[:, 1], test_predictions_deep[:, 1]) print((a+b)/2) a = r2_score(Y_test[:, 0], test_predictions_deep[:, 0]) b = r2_score(Y_test[:, 1], test_predictions_deep[:, 1]) print((a+b)/2) # + T = 3 P = 6 beta_target = np.zeros((N_train, T, P)) init_beta = np.zeros_like(beta_target)#vc_beta[:N, :P] lam = 1e-3 gamma = 1e3 alpha = 1e0 upsilon = 1e-2 inter_penalty = 0#2e0 l2_ratio = 1.0 rho_beta = lambda beta, i: lam*(functions.lasso_penalty(beta, beta_target[i]) + l2_ratio*functions.l2_penalty(beta, beta_target[i]) + inter_penalty*(beta[-1]**2)) rho_beta_prime = lambda beta, i:lam*(functions.lasso_derivative(beta, beta_target[i]) + l2_ratio*functions.l2_prime(beta, beta_target[i]) + 2*inter_penalty*np.vstack((np.zeros_like(beta[:-1]), beta[-1]))) init_phi_beta = np.hstack((utils.soft_normalize(np.ones((P*Y_train.shape[1]))))) psi_beta = lambda phi_beta: 0.5*alpha*np.linalg.norm(phi_beta - init_phi_beta, ord=2) psi_beta_prime = lambda phi_beta: alpha*(phi_beta - init_phi_beta) init_phi_u = utils.soft_normalize(np.ones((K))) psi_u = lambda phi_u: upsilon*np.linalg.norm(phi_u, ord=1) psi_u_prime = lambda phi_u: upsilon*np.sign(phi_u) init_beta_scale=1e2 psi_beta_scale = lambda beta_scale: 1e-3*(1./beta_scale) psi_beta_scale_prime = lambda beta_scale: -1e-3*(beta_scale**(-2)) dmr = DistanceMatching(init_beta=init_beta, f=lambda x, y, b: functions.logistic_loss_multitask(x, y, b.T), f_prime= lambda x, y, b: functions.logistic_loss_prime_multitask(x, y, b), gamma=gamma, n_neighbors=100, calc_dist_errors_every=1, calc_closest_every=10, rho_beta=rho_beta, rho_beta_prime = rho_beta_prime, init_phi_beta = init_phi_beta, psi_beta = psi_beta, psi_beta_prime = psi_beta_prime, init_phi_u=init_phi_u, psi_u=psi_u, psi_u_prime=psi_u_prime, init_beta_scale=init_beta_scale, psi_beta_scale=psi_beta_scale, psi_beta_scale_prime=psi_beta_scale_prime, intercept=False, n_threads=0) dZ = [ lambda x,y: functions.safe_wrapper(x, y, functions.abs_diff) ] for _ in range(K-1): dZ.append(lambda x,y: functions.safe_wrapper(x, y, functions.abs_diff)) # - print(init_beta.shape) if remake_delta_Z: delta_Z = dmr.make_covariate_distances( Z_train, dZ, len(dZ), len(Z_train), should_normalize=True) np.save("delta_Z.npy", delta_Z) else: delta_Z = np.load("delta_Z.npy") # + #print(vc_beta) #print(X_train.shape) #print(init_beta.shape) init_beta_lr = vc_beta.swapaxes(1, 2) init_beta_lr[:, :, -1] += np.random.uniform(0, 1, size=((init_beta_lr.shape[0], init_beta_lr.shape[1]))) print(init_beta_lr.shape) gamma = 1e5 lam = 1e-2 beta_target = np.zeros_like(init_beta_lr) rho_beta = lambda beta, i: lam*np.sum([ functions.lasso_penalty(beta[:, j], beta_target[i, :, j])+ functions.l2_penalty(beta[:, j], beta_target[i, :, j]) for j in range(beta.shape[1])]) rho_beta_prime = lambda beta, i: lam*( functions.lasso_derivative(beta, beta_target[i])+ functions.l2_prime(beta, beta_target[i])) # - dmr_lr = DMR_LR(init_beta=init_beta_lr, #vc_beta f=functions.linear_loss_multitask, f_prime=functions.linear_loss_prime_multitask, gamma=gamma, latent_dim=2, n_neighbors=5, update_ztree_every=25, calc_dist_errors_every=1, calc_closest_every=2, rho_beta=rho_beta, rho_beta_prime = rho_beta_prime, init_phi_u=init_phi_u, psi_u=psi_u, psi_u_prime=psi_u_prime, intercept=False, log_dir="./", n_threads=1) import sys sys.setrecursionlimit(100000) z_dmr_lr, b_dmr_lr = dmr_lr.fit( X_train, Y_train, Z_train, dZ, delta_U=delta_Z, init_lr=5e-7, lr_decay=1-1e-4, init_patience=25, verbosity=1, calc_neighbors=False, hierarchical=False) # + beta_hat_dmr_lr = np.tensordot(z_dmr_lr, b_dmr_lr, axes=1) #print(dmr_lr.best_losses_over_time) print(dmr_lr.best_phi_u) print(dmr_lr.best_Z) np.save("beta_har_dmr_lr_z.npy", dmr_lr.best_Z) # - print(X_train.shape) print(Y_train.shape) print(Z_train.shape) print(delta_Z.shape) print(len(dZ)) # 4:20 on 10/8 - Experimenting with extra neighbors: Had MSE < 0.03 with Neighbors = 50. (beta_hat_dmr, phi_beta, phi_u, distances_over_time, losses_over_time) = dmr.fit( X_train, Y_train, Z_train, dZ, delta_U=delta_Z, init_lr=1e-3, tol=1e-3, lr_decay=1-1e-7, init_patience=25, verbosity=1, hierarchical=False) # last experiment - 10x patience. previous result was 91% of MSE of mixture. # Increasing to 25 mixtures seems to help mixtures without changing personalized. (beta_hat_dmr, phi_beta, phi_u, distances_over_time, losses_over_time) = dmr.fit( X_train, Y_train, Z_train, dZ, delta_U=delta_Z, init_lr=1e-3, tol=1e-3, lr_decay=1-1e-7, init_patience=25, verbosity=1, hierarchical=False) print(phi_u) plt.matshow(np.abs(beta_hat_dmr[:, 0].T) > 1e-3) plt.matshow(beta_hat_dmr[:, 1].T) plt.matshow(beta_hat_dmr[:, 1].T - beta_hat_dmr[:, 0].T) print(beta_hat_dmr[:, 1].T - beta_hat_dmr[:, 0].T) print(beta_hat_dmr[:, 0].T) print(beta_hat_dmr.shape) beta_hat_dmr_lr = np.swapaxes(beta_hat_dmr_lr, 1, 2) # + #print(Y_test - np.mean(Y_train, axis=0)) #plt.hist(Y_test - np.mean(Y_train, axis=0)) #plt.hist(np.square(Y_test[:, :2] - np.mean(Y_train, axis=0)[:2])) phi_u = dmr_lr.best_phi_u delta_U_train_train = np.ones((len(X_train), len(X_train), K))# - np.eye(len(X_train)) for i in range(len(X_train)): delta_U_train_train[i, i] = np.zeros((K)) calc_train_mse = lambda beta: calc_mse(beta, X_train, Y_train, delta_U_train_train, 1, np.ones_like(phi_u)) base_train_mse, base_preds = calc_train_mse(base) overfit_train_mse, overfit_preds = calc_train_mse(overfit) mix_train_mse, mix_preds = calc_train_mse(mixture_beta) vc_train_mse, vc_preds = calc_train_mse(vc_beta) #dmr_train_mse = calc_train_mse(beta_hat_dmr) dmr_lr_train_mse, dmr_lr_preds = calc_train_mse(beta_hat_dmr_lr) print(overfit_preds) print(dmr_lr_preds) print("===== Train MSEs =====") print("Mean: {:.3f}".format(base_train_mse)) print("Overfit: {:.3f}".format(overfit_train_mse)) print("Mixture: {:.3f}".format(mix_train_mse)) print("Vc: {:.3f}".format(vc_train_mse)) #print("DMR: {}".format(dmr_train_mse)) print("DMR (LR): {:.3f}".format(dmr_lr_train_mse)) #print(dmr_train_mse/vc_train_mse) base_mse, base_preds = calc_mse(base, X_test, Y_test, delta_U_test_train, 1, np.ones_like(phi_u)) overfit_mse, overfit_preds = calc_mse(overfit, X_test, Y_test, delta_U_test_train, 1, np.ones_like(phi_u)) mix_mse, mix_preds = calc_mse(mixture_beta, X_test, Y_test, delta_U_test_train, 1, np.ones_like(phi_u)) vc_mse, vc_preds = calc_mse(vc_beta, X_test, Y_test, delta_U_test_train, 1, np.ones_like(phi_u)) #dmr_mse = calc_mse(beta_hat_dmr, X_test, Y_test, delta_U_test_train, 5, phi_u) dmr_lr_mse, dmr_lr_preds = calc_mse(beta_hat_dmr_lr, X_test, Y_test, delta_U_test_train, 2, phi_u) print("===== Test MSEs =====") print("Mean: {:.3f}".format(base_mse)) print("Overfit: {:.3f}".format(overfit_mse)) print("Mixture: {:.3f}".format(mix_mse)) print("Vc: {:.3f}".format(vc_mse)) #print("DMR: {}".format(dmr_mse)) print("DMR (LR): {:.3f}".format(dmr_lr_mse)) #print(dmr_mse/vc_mse) calc_mae = lambda a,b,c,d,e,f: calc_test_err(a,b,c,d,e,f,mse=False, mae=True) calc_test_mae = lambda theta, n, phi: calc_mae(theta, X_test, Y_test, delta_U_test_train, n, phi) base_mae, base_preds = calc_test_mae(base, 1, np.ones_like(phi_u)) overfit_mae, base_preds = calc_test_mae(overfit, 1, np.ones_like(phi_u)) mix_mae, base_preds = calc_test_mae(mixture_beta, 1, np.ones_like(phi_u)) vc_mae, base_preds = calc_test_mae(vc_beta, 1, np.ones_like(phi_u)) #dmr_mae = calc_mae(beta_hat_dmr, X_test, Y_test, delta_U_test_train, 5, phi_u) dmr_lr_mae, base_preds = calc_test_mae(beta_hat_dmr_lr, 2, phi_u) print("\n==== Test MAEs =====") print("Mean: {:.3f}".format(base_mae)) print("Overfit: {:.3f}".format(overfit_mae)) print("Mixture: {:.3f}".format(mix_mae)) print("VC: {:.3f}".format(vc_mae)) #print("DMR: {}".format(dmr_mae)) print("DMR (LR): {:.3f}".format(dmr_lr_mae)) #print(dmr_mae/mix_mae) # - # Sort and display MSE by increasing NN Dist np.save("beta_hat_dmr_lr.npy", beta_hat_dmr_lr) np.save("phi_u.npy", phi_u) np.save("phi_beta.npy", phi_beta) np.save("beta_hat_vc.npy", vc_beta) beta_hat_dmr_lr = np.load("beta_hat_dmr_lr.npy") phi_u = np.load("phi_u.npy") # + #print(np.argsort(np.abs((Z_test[0] - Z_train).dot(phi_u)))) beta_hat_dmr_test = np.array([ np.mean(beta_hat_dmr_lr[np.argsort(np.abs((Z_test[i] - Z_train).dot(phi_u)))[:3]], axis=0) for i in range(len(X_test))]) print(beta_hat_dmr_test.shape) # - dmr_preds_train = np.array([ X_train[i].dot(beta_hat_dmr_lr[i].T) for i in range(len(X_train)) ]) dmr_preds_test = np.array([ X_test[i].dot(beta_hat_dmr_test[i].T) for i in range(len(X_test)) ]) dmr_preds_train = np.exp(dmr_preds_train / (1+dmr_preds_train)) dmr_preds_test = np.exp(dmr_preds_test / (1+dmr_preds_test)) print(dmr_preds_train) print(dmr_preds_test) # + a = mean_squared_error(Y_train[:, 0], dmr_preds_train[:, 0]) b = mean_squared_error(Y_train[:, 1], dmr_preds_train[:, 1]) print(a, b, (a+b)/2) a = mean_squared_error(Y_test[:, 0], dmr_preds_test[:, 0]) b = mean_squared_error(Y_test[:, 1], dmr_preds_test[:, 1]) print((a+b)/2) a = r2_score(Y_test[:, 0], dmr_preds_test[:, 0]) b = r2_score(Y_test[:, 1], dmr_preds_test[:, 1]) print((a+b)/2) # + with open("results.txt", 'w') as result_file: print("{}\n".format(dmr_mse), file=result_file) print("{}\n".format(vc_mse), file=result_file) print(beta_hat_dmr.shape) delta_Z_test = dmr.make_covariate_distances( Z_test, dZ, len(dZ), len(Z_test), should_normalize=False) N_test = len(delta_Z_test) delta_Z_test = np.array([np.array([delta_Z_test[i, j].dot(phi_u) for j in range(N_test)]) for i in range(N_test)]) # - nearest_neighbor_dists = np.amin(delta_Z_test, axis=0) print(delta_Z_test) print(nearest_neighbor_dists) plt.hist(nearest_neighbor_dists) print(phi_u) print(phi_beta)
Experiments/Voting/2_fit_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + tags=["parameters"] epochs = 10 # - # # Teil 6 - Federated Learning auf MNIST mit einem CNN # # ## Upgraden zu Federated Learning mit 10 Zeilen PyTorch + PySyft # # # ### Kontext # # Federated Learning ist eine spannende und aufstrebende Machine Learning Technik, die es Systemen ermöglicht auf dezentralisierten Daten zu lernen. Der Grundgedanke dabei ist die Daten an ihrem ursprünglichen Ort zu belassen und dadurch die Privatsphäre des Besitzers zu stärken. Dafür wird jedoch das Machine Learning Model an alle Daten-Besitzer verteilt. Eine direkte Anwendung dafür ist die Vorhersage des nächsten Wortes beim Tippen auf dem Smartphone: die Trainingsdaten - z. B. die gesendeten Nachrichten - sollen hierbei keinesfalls auf einem zentralen Server gesammelt werden. # # Die Verbreitung von Federated Learning ist stark an das Bewusstsein für Daten-Privatsphäre geknüpft. Die DSGVO der EU, welche seit Mai 2018 den Schutz der Daten vorschreibt, kann hierbei als Auslöser angesehen werden. Um einer Regulierung zuvorzukommen, haben große Unternehmen wie Apple oder Google begonnen, stark in diesen Bereich zu investieren und die Privatsphäre der eigenen Nutzer somit zu schützen. Jedoch stellen sie ihre Werkzeuge dafür nicht der Allgemeinheit zur Verfügung. # Bei OpenMined galuben wir daran, dass jeder sein Machine Learning Projekt einfach mit Werkzeugen zum Schutz der Privatsphäre ausstatten können sollte. Aus diesem Grund wurden z. B. [Werkzeuge zum Verschlüsseln der Daten](https://blog.openmined.org/training-cnns-using-spdz/) in einer einzigen Zeile von uns entworfen und nun veröffentlichen wir unser Federated Learning Gerüst, welches auf PyTorch 1.0 aufbaut, um ein intuitives Interface zum Bauen sicherer und skalierbarer Machine Learning Modele anzubieten. # # In diesem Tutorial wird direkt mit dem [Beispiel vom Trainieren eines CNN auf MNIST mit PyTorch](https://github.com/pytorch/examples/blob/master/mnist/main.py) gearbeitet. Es wird gezeigt wie einfach es ist dies mit der [PySyft Bibliothek](https://github.com/OpenMined/PySyft/) auf Federated Learning anzupassen. Dabei wird jeder Teil des Beispiels betrachtet und die angepassten Codezeilen hervorgehoben. # # Das Material steht auch auf diesem [Blogpost](https://blog.openmined.org/upgrade-to-federated-learning-in-10-lines) bereit. # # Autoren: # - <NAME> - GitHub: [@LaRiffle](https://github.com/LaRiffle) # # Übersetzer: # - <NAME> - Github: [@JMBehnken](https://github.com/JMBehnken) # # **Ok, lassen Sie uns starten!** # ### Importe und Model Spezifikationen # # Zuerst erfolgen die offiziellen Importe. import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms # Darauf folgen die PySyft spezifischen Importe. Auch werden die Helfer `alice` und `bob` erstellt. import syft as sy # <-- NEW: import the Pysyft library hook = sy.TorchHook(torch) # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning bob = sy.VirtualWorker(hook, id="bob") # <-- NEW: define remote worker bob alice = sy.VirtualWorker(hook, id="alice") # <-- NEW: and alice # Die Einstellungen für das Lernen werden festgelegt. # + class Arguments(): def __init__(self): self.batch_size = 64 self.test_batch_size = 1000 self.epochs = epochs self.lr = 0.01 self.momentum = 0.5 self.no_cuda = False self.seed = 1 self.log_interval = 30 self.save_model = False args = Arguments() use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} # - # ### Laden der Daten und verteilen an die Helfer # # Zuerst wird der Trainingsdatensatz vorverarbeitet und dann als Federated Datensatz auf alle Helfer mit der `.federate` Methode verteilt. Dieser gebündelte Datensatz wird einem Federated DataLoader übergeben. Der Test-Datensatz bleibt unangetastet. # + federated_train_loader = sy.FederatedDataLoader( # <-- this is now a FederatedDataLoader datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])) .federate((bob, alice)), # <-- NEW: we distribute the dataset across all the workers, it's now a FederatedDataset batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.test_batch_size, shuffle=True, **kwargs) # - # ### CNN Spezifikationen # # Hier wird exakt dasselbe CNN Model verwendet wie im offiziellen Beispiel. class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) self.conv2 = nn.Conv2d(20, 50, 5, 1) self.fc1 = nn.Linear(4*4*50, 500) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4*4*50) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) # ### Definieren der Trainings und Test Funktionen # # Weil die Trainings-Daten auf `alice` und `bob` aufgeteilt sind, muss in der Trainings-Funktion auch das Model für jeden Trainings-Batch an den richtigen Helfer versendet werden. Dann werden alle Operationen automatisiert aus der Ferne gestartet. Dabei wird dieselbe Syntax verwendet wie beim lokalen Verwenden von PyTorch. Anschließend wird das verbesserte Model zurück geholt und auch das Ergebnis der Loss-Funktion kann betrachtet werden. def train(args, model, device, federated_train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(federated_train_loader): # <-- now it is a distributed dataset model.send(data.location) # <-- NEW: send the model to the right location data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() model.get() # <-- NEW: get the model back if batch_idx % args.log_interval == 0: loss = loss.get() # <-- NEW: get the loss back print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * args.batch_size, len(federated_train_loader) * args.batch_size, 100. * batch_idx / len(federated_train_loader), loss.item())) # Die Test-Funktion wird nicht abgeändert! def test(args, model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.argmax(1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) # ### Starten des Trainings! # + # %%time model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr) # TODO momentum is not supported at the moment for epoch in range(1, args.epochs + 1): train(args, model, device, federated_train_loader, optimizer, epoch) test(args, model, device, test_loader) if (args.save_model): torch.save(model.state_dict(), "mnist_cnn.pt") # - # Et voilà! Schon wurde ein Model mit Federated Learning auf fernen Daten trainiert! # ## Letzte Anmerkungen # # Eine wichtige Frage bleibt zu klären: **Wie lange dauert Federated Learning verglichen mit normalem PyTorch?** # # Die reine Rechenzeit dauert **etwas weniger als doppelt so lange** wie es mit normalem PyTorch dauern würde. Genau gemessen, dauert es 1.9 mal so lange, was im Hinblick auf die hinzugefügten Eigenschaften recht wenig ist. # ### PySyft auf GitHub einen Stern geben! # # Der einfachste Weg, unserer Community zu helfen, besteht darin, die GitHub-Repos mit Sternen auszuzeichnen! Dies hilft, das Bewusstsein für die coolen Tools zu schärfen, die wir bauen. # # - [Gib PySyft einen Stern](https://github.com/OpenMined/PySyft) # # ### Nutze unsere Tutorials auf GitHub! # # Wir haben hilfreiche Tutorials erstellt, um ein Verständnis für Federated und Privacy-Preserving Learning zu entwickeln und zu zeigen wie wir die einzelnen Bausteine weiter entwickeln. # # - [PySyft Tutorials ansehen](https://github.com/OpenMined/PySyft/tree/master/examples/tutorials) # # # ### Mach mit bei Slack! # # Der beste Weg, um über die neuesten Entwicklungen auf dem Laufenden zu bleiben, ist, sich unserer Community anzuschließen! Sie können dies tun, indem Sie das Formular unter [http://slack.openmined.org](http://slack.openmined.org) ausfüllen. # # ### Treten Sie einem Code-Projekt bei! # # Der beste Weg, um zu unserer Community beizutragen, besteht darin, Entwickler zu werden! Sie können jederzeit zur PySyft GitHub Issues-Seite gehen und nach "Projects" filtern. Dies zeigt Ihnen alle Top-Level-Tickets und gibt einen Überblick darüber, an welchen Projekten Sie teilnehmen können! Wenn Sie nicht an einem Projekt teilnehmen möchten, aber ein wenig programmieren möchten, können Sie auch nach weiteren "einmaligen" Miniprojekten suchen, indem Sie nach GitHub-Problemen suchen, die als "good first issue" gekennzeichnet sind. # # - [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject) # - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) # # ### Spenden # # Wenn Sie keine Zeit haben, zu unserer Codebase beizutragen, aber dennoch Unterstützung leisten möchten, können Sie auch Unterstützer unseres Open Collective werden. Alle Spenden fließen in unser Webhosting und andere Community-Ausgaben wie Hackathons und Meetups! # # - [OpenMined's Open Collective Page](https://opencollective.com/openmined)
examples/tutorials/translations/german/Part 06 - Federated Learning on MNIST using a CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="0onGgJHmwaDg" # # **Clasificador de células normales de sangre periférica** # # Interfaz gráfica que nos permita clasificar imágenes de células normales de sangre periférica utilizando cuatro modelos distintos de redes: # # * ResNet18 # # * ResNet34 # # * ViT -Base # # * ViT-Large # # Instrucciones: # # - Selecciona la imagen que quieras clasificar desde el botón *Examinar* # # - Selecciona la red que quieras utilizar para su clasificición en el desplegable. # # - ¡Todo listo! Haz click en *Clasificar* y aparecerá el valor de la predicción y su correspondiente etiqueta. # + colab={"base_uri": "https://localhost:8080/"} id="zegPRFCmyixE" outputId="ce5390f9-4060-432e-ddd9-1c5313142d8a" from fastai.vision.all import * from fastai.vision.widgets import * import os # + colab={"base_uri": "https://localhost:8080/"} id="yGVgtJcxxX2a" outputId="0f48d061-02d8-4803-8d3f-5097b6a8ccab" # Descomprimimos los archivos zip con los datos en el directorio #hide if (os.path.isdir("resnet18")): pass else: # !unzip -qq resnet18.zip if (os.path.isdir("resnet34")): pass else: # !unzip -qq resnet34.zip if (os.path.isdir("vit_base")): pass else: # !unzip -qq vit_base.zip if (os.path.isdir("vit_large")): pass else: # !unzip -qq vit_large.zip # + id="pcHmjlYFxGXd" # Cargamos los resultados para cada uno de los modelos path = Path() path_model = os.path.join(path, 'resnet18') path_18 = os.path.join(path, 'resnet18') learn_inf = load_learner(os.path.join(path_18, 'export.pkl')) # + id="xZwXXlnKyuHQ" path_34 = os.path.join(path, 'resnet34') learn_inf2 = load_learner(os.path.join(path_34, 'export.pkl')) path_12 = os.path.join(path, 'vit_base') learn_inf3 = load_learner(os.path.join(path_12, 'export.pkl')) path_24 = os.path.join(path, 'vit_large') learn_inf4 = load_learner(os.path.join(path_34, 'export.pkl')) # + id="hcX9WFHzwOf5" #Definimos los valores de los elemento que vamos a utilizar #hide_output btn_upload = widgets.FileUpload(multiple=False) out_pl = widgets.Output() btn_run = widgets.Button(description='Clasificar') btn_select = widgets.Dropdown( options=[('ResNet18', 18 ), ('ResNet34', 34), ('ViT-Base', 12), ('ViT-Large', 24)] ) lbl_pred = widgets.Label() # + id="yZ<KEY>" def on_click_classify(change): img = PILImage.create(btn_upload.data[-1]) out_pl.clear_output() with out_pl: display(img.to_thumb(128,128)) if btn_select.value == 18: pred,pred_idx,probs = learn_inf.predict(img) elif btn_select.value == 34: pred,pred_idx,probs = learn_inf2.predict(img) elif btn_select.value == 12: pred,pred_idx,probs = learn_inf3.predict(img) else: pred,pred_idx,probs = learn_inf4.predict(img) lbl_pred.value = f'Predicción: {pred}, Probabilidad: {probs[pred_idx]:.04f}' return lbl_pred.value btn_run.on_click(on_click_classify) # + id="kptYUqNcx3To" #hide #Putting back btn_upload to a widget for next cell btn_upload = widgets.FileUpload() # + colab={"base_uri": "https://localhost:8080/", "height": 337, "referenced_widgets": ["140e156008ff4c6c8e519b3a816aca20", "c4e1af99163e4d13bdc847ad0c0d1e64", "80c0a755473641ea8aa83238ce14c532", "b34234e96f924cbc923277a675d6041b", "5060212871a142bfb5341a7a32fd1341", "85508aaf24594ecb9c80a37cb8640bda", "3e14537f73f84bb2bd51019475d5a936", "<KEY>", "102c3453d58a4e83be9e8164dee19bba", "b6d537b377804632b523547e1e44eced", "ab3b6e4af138484780138b7972ca5569", "<KEY>", "1fefbece17184022a1ec5ebb32b5c9a1", "73b9c255b56a43dd903a17198427ed5a", "<KEY>", "49aca987907446a9a1855cde1a91653d", "<KEY>", "60d06394630a44b29fb02ae99c4e1ef3", "50f29482f3c7407da1eec70267ee3d88", "<KEY>", "8928b4abaae0493cb3eaf8f185b63233", "<KEY>"]} id="nX5nnSoLx5_h" outputId="29b2174f-7b09-4f64-cedb-2d736fc51503" from ipywidgets import * #hide_output VBox([widgets.Label('Selecciona tu imagen:'), btn_upload, out_pl, widgets.Label('Tipo de red a emplear:'), btn_select, btn_run, lbl_pred])
Interfaz_grafica_completa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Chapter 7<br/> # < Linear Regression w/sklearn > # =============================== from sklearn.datasets import load_boston import matplotlib.pyplot as plt import numpy as np boston = load_boston() # + x_data = boston.data y_data = boston.target.reshape(boston.target.size, 1) x_data[:3] # + from sklearn import preprocessing minmax_scale = preprocessing.MinMaxScaler().fit(x_data) x_scaled_data = minmax_scale.transform(x_data) x_scaled_data[:3] # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(x_scaled_data, y_data, test_size = 0.2) # + from sklearn import linear_model regr = linear_model.LinearRegression(fit_intercept = True, normalize = False, copy_X = True, n_jobs = 8) regr.fit(X_train, y_train) regr # - y_true = y_test y_pred = regr.predict(X_test) np.sqrt(((y_true - y_pred) ** 2).sum() / len(y_true)) # + from sklearn.metrics import mean_squared_error np.sqrt(mean_squared_error(y_true, y_pred)) # - print('Coefficients: ', regr.coef_) print('intercept: ', regr.intercept_) regr.predict(x_data[:5]) x_data[:5].dot(regr.coef_.T) + regr.intercept_ from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error # + y_true = y_test y_hat = regr.predict(X_test) r2_score(y_true, y_hat), mean_absolute_error(y_true, y_hat), mean_squared_error(y_true, y_hat) # + y_true = y_train y_hat = regr.predict(X_train) r2_score(y_true, y_hat), mean_absolute_error(y_true, y_hat), mean_squared_error(y_true, y_hat)
Chapter.7/9.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .sh # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Calysto Bash # language: bash # name: calysto_bash # --- # <!--BOOK_INFORMATION--> # <img align="left" style="padding-right:10px;" src="figuren/book_cover_small.jpg"> Dieses Notebook ist Teil eines Begleit-Kurses zu dem im Springer-Verlag erschienenem Buch [Einführung in Unix/Linux für Naturwissenschaftler](http://www.springer.com/de/book/9783662503003) # # Das Material wird Ihnen unter der [CC BY 4.0 Lizenz](http://creativecommons.org/licenses/by/4.0/deed.de) zur Verfügung gestellt. Wenn Ihnen der Kurs weiterhilft und Sie mich bei der Erstellung weiterer kostenloser Tutorials unterstützen möchten, überlegen Sie sich bitte, [das Buch zu kaufen](http://www.springer.com/de/book/9783662503003). # <!--NAVIGATION--> # < [Vorherige Lektion](04_Shell_Pipelines_und_Filter.ipynb) || [Verständnisfragen zu dieser Lektion](Verstaendnisfragen_zu_Lektion_05_for_Schleife.ipynb) || [Nächste Lektion](06_Shell_Shell-Skripte.ipynb) > # # Die `for`-Schleife # ## Was behandeln wir in diesem Notebook / Lernziele # <div class="alert alert-success"> # # <b>Fragestellung:</b> # <ul> # <li> Wie kann ich existierende <code>Unix</code>-Programme kombinieren, um neue und komplexere Aufgaben zu erledigen?</li> # </ul> # # <b>Aspekte der Fragestellung:</b> # <ul> # <li> Reproduzierbarkeit und Automatisierung von Aufgaben durch Schleifen</li> # </ul> # # <b> Zeitaufwand für diese Lektion: </b> # <ul> # <li> Durcharbeiten des Textes: 30 min</li> # <li> Verständnisfragen: 20 min</li> # </ul> # </div> # ## Schleifen # Schleifen sind ein wichtiges Konstrukt der Programmierung. Sie erlauben es uns, Befehlssequenzen automatisch und reproduzierbar auf jedes Element *einer Liste* anzuwenden. Neben den in der letzten Lektion besprochenen Pipelines sind sie der zweite wichtige Baustein, um `Unix`-Kommandos für komplexe Aufgaben zu kombinieren. Sie sind essentiell, um langweilige und sich wiederholende Aufgaben fehlerfrei, effektiv und mit einem Mindestmaß an manueller Intervention durchzuführen. # ## Die `for`-Schleife # Die `bash`-Shell bietet mehrere Schleifenkonstrukte und wir werden uns in diesem Tutorial auf die so genannte `for`-Schleife beschränken. # # Annika arbeitet noch an ihrem Seminar zu den Monden der Planeten unseres Sonnensystems. cd ~/Seminar/Planeten_Daten ls # Sie möchte für jeden Planeten den Mond mit der geringsten Umlaufperiode ermitteln. Mit der Struktur der Planetendateien (siehe [hier](04_Shell_Pipelines_und_Filter.ipynb#Planetendatei_Struktur)) ist dies der Mond mit dem niedrigsten Wert in der vierten Spalte. Um diesen für Jupiter zu erhalten, verwendet Annika die Pipeline # zeige den Mond mit der geringsten Umlaufperiode um Jupiter sort -g -k 4 Jupiter.dat | head -n 1 # Die Option `-k 4` für `sort` sorgt dafür, dass *`sort `die ersten drei Spalten nicht verwendet und erst der Inhalt ab Spalte vier für die Sortierung herangezogen wird*. Dies ist im vorliegenden Fall effektiv eine Sortierung nach der Umlaufzeit der Monde. # # Um diese Aufgabe für jeden Planeten zu lösen, muss obige Pipeline jedes mal mit der entsprechenden Planetendatei neu ausgeführt werden. Das Problem, exakt dieselbe Befehlssequenz mit einer *Variablen* (hier die Planetendateien) wiederholt ausführen zu müssen, ist ein typischer Anwendungsfall für eine Schleife. # # Die allgemeine Struktur der `for`-Schleife ist wie folgt: # ```bash # for VARIABLE in LISTE # do # BEFEHLSSEQUENZ mit ${VARIABLE} # done # ``` # Eine genauere Erklärung folgt weiter unten. # # In unserem konkreten Fall sieht das, zunächst für die zwei Planeten Jupiter und Saturn, wie folgt aus # Schleife, um den Befehl sort -g -k 4 ..... | head -n 1 # auf die 'Liste' Jupiter.dat und Saturn.dat anzuwenden # for DATEI in Jupiter.dat Saturn.dat do sort -g -k 4 ${DATEI} | head -n 1 done # Das Schlüsselwort `for` sagt der Shell, dass eine Befehlssequenz für jedes Element einer *Liste* zu wiederholen ist. In jedem Schleifendurchlauf (auch Iteration genannt) wird ein Element der Liste einer *Schleifenvariable* zugeordnet und die Befehle innerhalb der Schleife werden ausgeführt, bevor zum nächsten Listenelement gesprungen wird. Innerhalb der Schleife können wir auf den *Wert* der *Schleifenvariable* mit dem Konstrukt `${...}` zugreifen, wobei `...` für den *Variablennamen* stehen. # # Im konkreten Fall hat die Liste zwei Elemente, die Dateinamen `Jupiter.dat` und `Saturn.dat`. In jedem der zwei Schleifendurchläufe wird ein Dateiname der Schleifenvariable `DATEI` zugewiesen und der `sort`-Befehl ausgeführt. In der ersten Iteration hat die Variable `DATEI` den *Wert* `Jupiter.dat`, auf welchen mit `${DATEI}` zugegriffen wird. Es wird also in der ersten Iteration der Befehl `sort -g -k 4 Jupiter.dat | head -n 1` ausgeführt. Das Ganze wiederholt sich in der zweiten Iteration, wobei der Wert der Variable `DATEI` jetzt `Saturn.dat` ist. Da die Liste im gegebenem Fall nur zwei Elemente hat, endet die Schleife nach zwei Iterationen. # # Der allgemeine Programmfluss einer Schleife und die konkreten Schleifenelemente des aktuellen Beispiels sind noch einmal in folgender Figur dargestellt. # <img src="figuren/Shell_for_Schleife_fig1_und_2.png" style="width: 600px;"> # Wir haben die Schleife bisher für Jupiter und Saturn verwendet. Um sie auf alle Planeten anzuwenden, müssen wir die erforderliche Liste *nicht* manuell erzeugen, sondern wir können sie einfach und effizient mit Wildcards von der Shell generieren lassen! # Wende die Schleife auf alle Dateien mit der Endung '.dat' an. # Hier entspricht dies allen Planetendateien. # for DATEI in *.dat do sort -g -k 4 ${DATEI} | head -n 1 done # ## Verschiedenes im Zusammenhang mit der `for`-Schleife # ### Hilfe bei der Schleifenkonstruktion # Anfängern fällt es oft schwer, Schleifen korrekt aufzubauen und damit verbundene Fehler sind nach Schleifendurchführung manchmal schwer zu finden. Eine Hilfe ist es, sich vor der aktuellen Schleifenausführung die Listenelemente und die Schleifenbefehle anzeigen zu lassen und sich so zu vergewissern, dass alles *korrekt* ist. Hierbei hilft das Kommando `echo`, welches unter anderem in Anführungsstrichen eingebetteten Text auf dem Bildschirm ausgeben kann. echo "<NAME>" # gibt einfach '<NAME>' aus # Interessant ist dieser Befehl in Zusammenhang mit Schleifen, da Variablen *vor der Ausgabe* ausgewertet werden. # Die Schleife von oben. Der Schleifenbefehl ist aber in # einen echo-Befehl eingebettet. Er wird dadurch angezeigt, aber # nicht ausgeführt. # for DATEI in *.dat do echo "sort -g -k 4 ${DATEI} | head -n 1" done # Mit diesem Trick kann man sich die Befehle, die die Schleife ausführen würde, erst einmal ansehen, *bevor* sie abgearbeitet werden. Wenn alles in Ordnung ist, entfernt man den `echo`-Befehl samt Anführungszeichen und führt die Schleife aus. # ### Sukzessive Konstruktion einer Textdatei mit einer Schleife # Annika möchte sich die Monde mit den geringsten Umlaufzeiten nicht nur ausgeben lassen, sondern diese in eine Datei `schnelle_Monde.txt` speichern. Um dies zu erreichen, gibt es eine weitere Form der Ausgabeumlenkung, (siehe [die vierte Lektion](04_Shell_Pipelines_und_Filter.ipynb#Ausgabeumlenkung)). Die bisher betrachtete Ausgabeumlenkung leitet die Ausgabe eines Programms in eine Textdatei um. Bei Existenz dieser Datei wird diese allerdings *überschrieben*. echo "Annika" > test.txt # lenke Annika in die Datei test.txt cat test.txt echo "Oliver" > test.txt # lenkt Oliver in Datei test.txt; # Annika wird überschrieben. cat test.txt # Es ist also mit dem `>`-Operator nicht möglich, eine Datei schrittweise aufzubauen und zu erweitern. Mit dem zu `>` verwandten Operator `>>` wird ebenfalls die Ausgabe eines Befehls in eine Datei umgelenkt. Falls die Ausgabedatei noch nicht existiert, so haben beide Operatoren denselben Effekt. Existiert die Datei jedoch, so wird die Ausgabe mit `>>` *an die bestehende Datei angehängt*! echo "Annika" > test.txt # lenke Annika in test.txt cat test.txt echo "Oliver" >> test.txt # hängt Oliver an test.txt an cat test.txt # Hiermit kann Annika ihre Datei `schnelle_Monde.txt` erstellen. # Die schnellsten Monde werden alle in eine Datei # geschrieben. # for DATEI in *.dat do sort -g -k 4 ${DATEI} | head -n 1 >> schnelle_Monde.txt done cat schnelle_Monde.txt # ### Variablennamen # Wir haben in unserem Beispiel für den Variablennamen der Schleife `DATEI` gewählt, da es sich bei den Listenelementen um Dateinamen handelt. Es ist jedoch für die Funktionsweise der Schleife egal, welchen Namen wir wählen. So wäre unsere Schleife von eben äquivalent zu folgender Variante for x in *.dat do sort -g -k 4 ${x} | head -n 1 done # Aus Gründen der Lesbarkeit sollte jedoch ein dem Problem angepasster Name gewählt werden. Das hilft Ihnen und anderen, Ihre Schleifen besser und einfacher zu verstehen - vor allem, wenn Sie diese nach längerer Zeit wieder bearbeiten und an ein neues Problem anpassen müssen. Obwohl es nicht zwingend erforderlich ist, hat es sich auch eingebürgert, für Schleifenvariablen nur Großbuchstaben zu verwenden. Hiermit sind sie von anderen Schleifenelementen wie Kommandonamen oder Optionen sofort zu unterscheiden. # ### Schleifen in der Shell # Die `for`-Schleife hat `drei` wesentliche Bestendteile: (1) Die Definition `for VARIABLE in LISTE`, was die Schleife einleitet und die Schleifenvariable und die Schleifenliste definiert; (2) Das Schlüsselwort `do`, welches den Schleifenbefehlsblock einleitet samt den eigentlichen Schleifenbefehlen und (3) das Schlüsselwort `done`, welches die Schleife abschließt. Hier in den Notebookzellen können wir diese Bestandteile schön formatiert darstellen, womit die Schleifenstruktur klar wird. Beachten Sie auch, dass wir die Schleifenbefehle etwas *einrücken*, um diesen Befehlsblock klar vom Rest hervorzuheben. Falls wir später neue Befehle in die Schleife einfügen müssen, wird sofort klar, wo dies zu tun ist. Betrachten wir das bisherige Beispiel, welches wir um einen zweiten Schleifenbefehl erweitern. # gut formatierte und übersichtliche Schleife # for DATEI in *.dat do echo "Arbeite an Datei ${DATEI}" sort -g -k 4 ${DATEI} | head -n 1 done # Innerhalb einer `Unix`-Terminalzeile ist es leider nicht ohne Weiteres möglich, eine Schleife so schön formatiert darzustellen. Hier muss alles *innerhalb einer Zeile* untergebracht werden. Dies wird durch Strichpunkte vor dem `do`, das den Befehlsblock einleitet, und nach jedem der Schleifenbefehle erreicht. # innerhalb einer einzigen Unix-Terminalzeile sähe obige # Schleife wie folgt aus. Um alles in einer Zeile zu setzen, # sind Strichpunkte for dem 'do' und nach jedem der # Schleifenbefehle nötig. # for DATEI in *dat; do echo "Arbeite an Datei ${DATEI}"; sort -g -k 4 ${DATEI} | head -n 1; done # Wir erwähnen dies, damit Sie Schleifen in der Terminalzeilenstruktur gegebenenfalls verstehen und analysieren können. Wie Sie auch bei der Arbeit innerhalb eines `Unix`-Terminals ordentlich formatierte Schleifen erstellen und warten können, besprechen wir in der nächsten Lektion. # <a id='Statistik_Beobachtungen'></a> # ## Annikas Bachelor-Arbeit: Statistiken aller Beobachtungen # Nachdem Annika bereits überprüft hat, wie vollständig ihre Beobachtungen sind (siehe [hier](04_Shell_Pipelines_und_Filter.ipynb#Annika_Test_Beobachtungen)), möchte sie jetzt die Qualität der vorhandenen Daten testen. cd ~/Bachelor_Arbeit/Beobachtungen ls # Sie hat von ihrem Betreuer das Programm `calc_stats.py` bekommen, welches den Mittelwert und die Standardabweichung einer Beobachtung berechnen kann. Annika testet zuerst, ob dieses Programm funktioniert und ob sie es richtig verwendet. # Berechne Mittwlwert und Standardabweichung einer Beobachtung # im processed Unterverzeichnis # Ausgabe des Programms ist: # Beobachtung Mittelwert Standardabweichung python3 calc_stats.py processed/695833p.txt # Annika weiß, dass sie in jeder Beobachtung einen Mittelwert um 0 und eine Standardabweichung um 1 zu erwarten hat. Sie sieht daher diesen ersten Schritt als erledigt an. Als nächstes möchte sie das Programm mit einer Schleife über *alle* Beobachtungsdateien im Unterverzeichnis `processed` laufen lassen. Da Annika gerade anfängt, `for`-Schleifen zu programmieren und da die Analyse einige Zeit dauern wird, geht sie schrittweise vor. # Als erstes möchte sie nur einen kleinen Teil ihrer Daten betrachten und wählt einen Testdatensatz aus. # Wähle einen Testdatensatz, um die Analyseschleife zu entwickeln und # zu testen; kopiere diese Daten in ein separates test-Verzeichnis mkdir test cp ./processed/69583*p.txt ./test ls test # Jetzt setzt sie eine erste Schleife über die Testdaten auf und überprüft, dass die Schleifenbefehle und die Schleifenliste korrekt sind. # <a id='for-statistik'></a> # Testschleife zur Verifizierung der Listenelemente # und der Schleifenbefehle for BEOBACHTUNG in test/*p.txt do echo "python3 calc_stats.py ${BEOBACHTUNG}" done # Hier sieht alles gut aus und Annika wendet die Analyseschleife auf die Testdaten an. # Analyseschleife über die Testdaten # for BEOBACHTUNG in test/*p.txt do python3 calc_stats.py ${BEOBACHTUNG} done # Nachdem auch dies erfolgreich ist, würde Annika als letzten Schritt die Analyseergebnisse der Schleife in eine Ergebnisdatei speichern. # Analyseschleife über die Testdaten # nur Speicherung der Ergebnisse # for BEOBACHTUNG in test/*p.txt do python3 calc_stats.py ${BEOBACHTUNG} >> test_resultat.txt done cat test_resultat.txt # Nach diesen Vorarbeiten kann Annika ihre Schleife guten Gewissens auf den vollen Datensatz anwenden. Sie muss hierzu lediglich noch die Schleifenliste `test/*p.txt` durch `processed/*p.txt` ersetzen. Da dies ein wenig dauern würde, verzichten wir hier darauf. # <div class="alert alert-success"> # <b>Zum Mitnehmen</b> # <ul> # <li> <code>befehl >> datei</code> hängt die Ausgabe von <code>befehl</code> an <code>datei</code> an.</li> # <li> <code>echo "text"</code> Gibt <code>text</code> auf dem Bildschirm aus.</li> # <li> Die <code>for</code>-Schleife kann eine Befehlssequenz auf alle Elemente einer Liste ausführen.</li> # <li> Wählen Sie für wissenschaftliche Daten konsistente Dateinamen, die einfach mit Wildcards anzusprechen sind. Dies macht es Ihnen einfach, sie effizient mit Schleifen zu verarbeiten.</li> # <li> Entwickeln Sie komplizierte Schleifen schrittweise und testen Sie diese mit kleinen Datensätzen.</li> # </ul> # </div> # <!--NAVIGATION--> # < [Vorherige Lektion](04_Shell_Pipelines_und_Filter.ipynb) || [Verständnisfragen zu dieser Lektion](Verstaendnisfragen_zu_Lektion_05_for_Schleife.ipynb) || [Nächste Lektion](06_Shell_Shell-Skripte.ipynb) >
05_Shell_for_Schleife.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="images/logo.jpg" style="display: block; margin-left: auto; margin-right: auto;" alt="לוגו של מיזם לימוד הפייתון. נחש מצויר בצבעי צהוב וכחול, הנע בין האותיות של שם הקורס: לומדים פייתון. הסלוגן המופיע מעל לשם הקורס הוא מיזם חינמי ללימוד תכנות בעברית."> # ## <span style="text-align: right; direction: rtl; float: right; clear: both;">הערות</span> # <p style="text-align: right; direction: rtl; float: right; clear: both;"> # לפעמים נרצה לצרף דברי הסבר לקוד שכתבנו.<br> # כדי להוסיף הסברים לצד הקוד נשתמש בתו סולמית, ומייד אחריה נכתוב את ההסברים שרצינו לצרף.<br> # החלקים האלו בקוד נקראים <dfn>הערות</dfn>, ופייתון מתעלמת מקיומם כשהיא מריצה את הקוד. # </p> # <p style="text-align: right; direction: rtl; float: right; clear: both;"> # לדוגמה: # </p> # This will print "Hello World" print("Hello World") # <p style="text-align: right; direction: rtl; float: right; clear: both;">או אפילו באותה שורה:</p> print("Hello World") # This will print "Hello World" # <p style="text-align: right; direction: rtl; float: right;"> # הערות נועדו להסביר את הקוד שלכם למתכנתים אחרים שיקראו אותו בעתיד.<br> # בינתיים, אנחנו נשתמש בהערות כדי להבהיר ולהדגיש דוגמאות במחברת הקורס.<br> # נהוג לכתוב הערות באנגלית, אבל רק לצורך הקורס שלנו, שמתקיים בעברית, נכופף קצת את החוקים. # </p> # # <span style="text-align: right; direction: rtl; float: right; clear: both;">אריתמטיקה</span> # <p style="text-align: right; direction: rtl; float: right;"> # אין צורך לפחד מהמונח המאיים, מדובר פשוט בפעולות חשבון בסיסיות. בואו נראה מה פייתון יודעת לעשות.<br> # חשבו מה תהיה התוצאה בכל אחד מהתאים הבאים. לאחר שכתבתם בצד את התוצאה שעליה חשבתם, הריצו את התאים ובדקו אם צדקתם. # </p> print(5 + 1) print(5 - 1) print(1 - 5) # פייתון מכיר בשליליים print(5 - 1 / 2) # סדר פעולות חשבון? print(5.5 - 1.3) print(1.2 + 3) print(2 * 5) print(5 * 2) print(5 - 2) print(3 ** 2) # כדאי לזכור שכפול פעמיים מסמל חזקה print(2 ** 3) print(5 // 2) # חילוק שלמים (נסו לנחש, מוסבר בהרחבה למטה) print(5 % 2) # השארית מחלוקת השלמים print(5 * (6 / (2 * 5))) # אפשר גם סוגריים, אם צריך # ## <span style="text-align: right; direction: rtl; float: right; clear: both;">חלוקת שלמים ושארית</span> # ### <span style="text-align: right; direction: rtl; float: right; clear: both;">חלוקת שלמים</span> # <p style="text-align: right; direction: rtl; float: right;"><dfn>חלוקת שלמים</dfn> היא פעולה שתוצאתה תמיד מספר שלם, בניגוד לפעולת החילוק שאנחנו רגילים אליה (<code>/</code>).</p> # # <p style="text-align: right; direction: rtl; float: right;">כאשר אנחנו עושים חלוקת שלמים מהצורה <code>A // B</code>, אנחנו למעשה מתכוונים לשאול:</p> # # <blockquote style="text-align: left; direction: ltr; float: right; border-left: 0; border-right: 5px solid #eeeeee;"><p style="text-align: right; direction: rtl; float: left;">אם יש לי A משולשי פיצה, וחילקתי את כל משלושי הפיצה באופן שווה ל־B ילדים (תזהרו מלחתוך לי את המשולשים!), כמה משולשי פיצה יקבל כל ילד?</p></blockquote> # # <p style="text-align: right; direction: rtl; float: right;">לדוגמה: הביטוי <code>4 // 9</code>, אומר שיש לנו 9 משולשי פיצה ו־4 ילדים רעבים.<br>אם נחלק את משולשי הפיצה בין הילדים, נגלה שכל ילד יקבל 2 משולשים, ושנשאר משולש אחד שלא ניתן לחלק.</p> print(9 // 4) # ### <div style="text-align: right; direction: rtl; float: right;">שארית</div> # <p style="text-align: right; direction: rtl; float: right;">למשולשי הפיצה שלא הצלחנו לחלק לילדים אנחנו קוראים <dfn>שארית</dfn>, וזו התוצאה שנקבל כשנכתוב את הסימן <code>%</code> במקום <code>//</code>.<br>לדוגמה, אם היינו כותבים <code>4 % 9</code>, היינו מקבלים <code>1</code>, הרי הוא משולש הפיצה שנשאר מהדוגמה שלמעלה.</p> print(9 % 4) # <div class="align-center" style="display: flex; text-align: right; direction: rtl;"> # <div style="display: flex; width: 10%; float: right; "> # <img src="images/tip.png" style="height: 50px !important;" alt="טיפ!"> # </div> # <div style="width: 90%;"> # עבור מספרים חיוביים, תוכלו להמיר את החלוקה ה"רגילה" מהצורה # <div style="display: inline-flex; direction: ltr;">$\normalsize{\frac{9}{4}}$</div> # לשבר מעורב מהצורה # <div style="display: inline-flex; direction: ltr;">$\normalsize{2\frac{1}{4}}$</div>. # <br> # המספר השלם שהתקבל # <div style="display: inline-flex; direction: ltr;">($\normalsize{2}$)</div> # הוא תוצאת חילוק השלמים, בעוד שהמספר במונה # <div style="display: inline-flex; direction: ltr;">($\normalsize{1}$)</div> # הוא השארית. # </div> # </div> # ## <span style="align: right; direction: rtl; float: right;">תרגול: תה אמריקה</span> # <p style="text-align: right; direction: rtl; float: right;"> # חובבי תה רציניים ידעו להגיד לכם, שחליטת התה שלכם צריכה להיעשות בטמפרטורה מסוימת מאוד, שתלויה בסוג התה שבידכם.<br> # כפלצן תה גדול, אני מזמין לעיתים קרובות תה ממחוזות מוזרים בעולם, שבהם מציגים את הטמפרטורה המושלמת לחליטת התה במעלות פרנהייט במקום בצלזיוס.<br> # לאחרונה קיבלתי תה יסמין שהיה צריך לחלוט ב־176 מעלות פרנהייט.<br> # הדלקתי קומקום ועקבתי אחרי הטמפרטורה של המים. עזרו לי, והדפיסו את הטמפרטורה שאליה המים צריכים להגיע, במעלות <em>צלזיוס</em>. # </p> # # <p style="text-align: right; direction: rtl; float: right;"> # הנוסחה להמרה מפרנהייט לצלזיוס היא <code>(5 חלקי 9) כפול (מעלות בפרנהייט פחות 32)</code>.<br> # או, אם אתם מעדיפים כתיב מתמטי: <span style="display: inline-flex; direction: ltr;">$\normalsize{C = \frac{5}{9}\times(F - 32)}$</span><br> # לדוגמה, אם הייתי צריך לחלוט את התה שלי ב־212 מעלות פרנהייט, הייתי מצפה שתציגו לי <samp>100</samp>, כיוון ש: <span style="display: inline-flex; direction: ltr;">$\normalsize{C = \frac{5}{9}\times(212 - 32)} = 100$</span> # </p> # + # הקלידו את הקוד שלכם כאן!
week01/2_Arithmetics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Galaxy Mergers # The project was originally developed by <NAME> and modified by <NAME>. The idea for this project came from <NAME>, who visited our department to give a colloquium on his research in Winter 2014. All content is licensed under the MIT License. # --- # ## Introduction # One of the fundamental questions in astrophysics is how galaxies form, evolve and interact. It might seem surprising that galaxies separated by vast distances could interact with one another, but their immense masses and the gravitational forces that govern their formation and evolution nevertheless can lead to violent interactions. The Milky Way galaxy is right now on a collision course with the Andromeda galaxy and they are predicted to collide in about 4 billion years. Even though both galaxies are made up of hundreds of billions of stars, the separation between stars in the galaxies means that the chances of direct stellar collisions are extremely small. Nevertheless, if our descendents are still around to experience it, the view of our night sky will definitely change as a result of this interaction. # Here is a computer simulation of the merger generated using data from the Hubble Space Telescope and our knowledge of the gravitational interaction among the constituents. from IPython.display import YouTubeVideo YouTubeVideo('4disyKG7XtU') # So how does one go about simulating the collision of two galaxies? The video above was probably generated on massive supercomputers running full blast for a very long time to generate the paths of all of the individual stars as the galaxies merged. Is it really possible that we could generate something like that using the programming skills you have learned? The answer is yes, if we make some simplifying assumptions. # # In this project you will create a simulation of galaxy mergers using the methods of Toomre and Toomre, two brothers who used state of the art computers at MIT and NYU in 1972 to investigate the dynamics of massless point particles orbiting a massive galactic nucleus $M$ in a parabolic orbit about the center of mass with another massive galactic nucleus $S$. Their work was an extension of a previous paper published in German in 1963 describing the system of equations and an early attempt to investigate how the spiral structure of galaxies emerges. # [Toomre and Toomre's 1972 paper](http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1972ApJ...178..623T&amp;data_type=PDF_HIGH&amp;whole_paper=YES&amp;type=PRINTER&amp;filetype=.pdf) on the simulation of galaxy mergers with Newtonian mechanics. # # [Pfleiderer 1963](http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1963ZA.....58...12P&amp;data_type=PDF_HIGH&amp;whole_paper=YES&amp;type=PRINTER&amp;filetype=.pdf) is in German, but includes the equations and the set up of the problem. <NAME> translated the relevant parts of the paper to come up with a description of the equations and variables that were used by Pfeliderer and the Toomres to do their simulation. # ## The equations # This is a restricted 3-body problem in which particles composing the outer disk of galaxy $M$ are massless but nevertheless interact through inverse square laws with the mass centers of their galactic central mass ($M$) and the central mass of the disrupting galaxy ($S$). # The calculation is performed in the rest frame of the mass $M$ lying at the origin, with the starting positions for the massless point particles $m_i$ (stars) in the orbits around it given by $(\boldsymbol{r_0})_i$ and the position of mass $S$ relative to $M$ given by $\boldsymbol{\Re}$. The evolution of the positions of the stars $m_i$ and galaxy $S$ relative to $M$ is dictated by the set of differential equations: # $$ \ddot{\mathbf{r}} = -\gamma \left\{ \frac{M}{r^3}\mathbf{r} -\frac{S}{\rho^3}\boldsymbol{\rho} + \frac{S}{R^3}\boldsymbol\Re \right\} $$ # # $$ \ddot{\boldsymbol\Re} = -\gamma \frac{M+S}{R^3}\boldsymbol\Re$$ # where # # * $\gamma$ is the Gravitational constant. # * $M$ is the central mass of the main galaxy and $S$ is the central mass of the disrupting galaxy # * $\mathbf{r}$ is the radius vector from mass $M$ to massless point particle $m$, representing a single (massless) star in the outer disk of the main galaxy. # * $\boldsymbol\Re$ is the radius vector from $M$ to $S$ # * $\boldsymbol{\rho} = \boldsymbol{\Re} - \boldsymbol{r}$ # One starts with a position and velocity vector for $S$, and the $m_i$ (remember that we are in the rest frame of $M$ so it is at rest at the origin of the coordinate system) and then solves these differential equations to get the positions of $S$, and the set of $m_i$ ($i$ = 0,...,$N$) as a function of time under the influence of pure Newtonian gravity. # # After the equations are solved, one can either draw static images of the system at specific points in time, or form an animated movie of the interaction. For this project, you will do both. # # In the image from Toomre and Toomre shown below, they transformed to the center-of-mass of the $M$+$S$ system for the first 4 frames and then switched back to the rest frame of $M$ for the six subsequent frames. # # <img src="toomre_toomre_fig1.png"> # Section II of the paper describes exactly the algorithm used to compute the results, followed by four examples. # ## The Project # The original paper describes four examples: # # * Direct passage # * Retrograde passage # * Light mass disruptor # * Heavy mass disruptor # # You should pick one/two of these cases as your **base question**. # # Pfleiderer's paper lists a variety of interactions and initial conditions, including elliptical orbits for the point masses, from which the Toomre examples are a subset. Your **two additional questions** should come from this list of initial conditions. # # For all initial conditions studied you should have: # # * Static visualizations. # * Animations/movies. # * Multiple perspectives, both the center-of-mass of the system and the rest frames of the two interactive galaxies $M$ and $N$. # <table> # <tr><th> Type</th><th>Position $X$</th><th>Position $Y$</th><th>Velocity $U$</th><th>Velocity $V$</th><th> Eccentricity $\epsilon_s$</th><th> Mass ratio $S/M$</th></tr> # <tr><td>S1+</td><td>1</td><td>0</td><td>&gt; 0</td><td>0</td><td>1</td><td>1</td></tr> # <tr><td>S1-</td><td>1</td><td>0</td><td>&lt; 0</td><td>0</td><td>1</td><td>1</td></tr> # <tr><td>S2+</td><td>1</td><td>0</td><td>&gt; 0</td><td>0</td><td>7</td><td>1</td></tr> # <tr><td>S2-</td><td>1</td><td>0</td><td>&lt; 0</td><td>0</td><td>7</td><td>1</td></tr> # <tr><td>S3+</td><td>1</td><td>0</td><td>&gt; 0</td><td>0</td><td>31</td><td>3</td></tr> # <tr><td>S3-</td><td>1</td><td>0</td><td>&lt; 0</td><td>0</td><td>31</td><td>3</td></tr> # <tr><td>S4</td><td>1</td><td>0</td><td>0</td><td>&gt; 0</td><td>31</td><td>3</td></tr> # <tr><td>S5</td><td>1</td><td>0</td><td>0</td><td>&gt; 0</td><td>1</td><td>1</td></tr> # <tr><td>S6</td><td>0</td><td>1</td><td>&gt; 0</td><td>0</td><td>31</td><td>3</td></tr> # <tr><td>S7</td><td>0</td><td>1</td><td>&gt; 0</td><td>0</td><td>1</td><td>1</td></tr> # </table> # # ## Implementation advice # The exercises related to Ordinary Differential Equations are useful guidance for how to set up your solution. Start with a single point mass orbiting the main galaxy and its interactions with the disrupting galaxy. Once you have it working for a single test mass, add in more until you have a complete solution. # # You will need to tune the error tolerances of `odeint` to ensure that you are accurately solving the equations of motion. One good way of making sure you have the errors under control is to compute the total energy of the system (which should be constant). This will require a bit of derivation, but would be very worth your time. # # One of the main challenges you will face is how to perform animations of your results. Here are some thoughts on how to do that: # # * Perform your simulations separate from the visualizations and animations. Save the solution arrays for each simulation to disk (in the `npz` format). You should come up with a naming scheme for your files that makes it clear what parameters were used for each simulation. You may even want to save those parameters into a JSON file with the same naming scheme. # * Generate visualizations and animations from data that is saved to disk. # # I will provide you with some additional information about how to create animations using Matplotlib.
assignments/project/GalaxyMergers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import tensorflow as tf import functools # Creates a tf feature spec from the dataframe and columns specified. def create_feature_spec(df, columns=None): feature_spec = {} if columns == None: columns = df.columns.values.tolist() for f in columns: if df[f].dtype is np.dtype(np.int64): feature_spec[f] = tf.FixedLenFeature(shape=(), dtype=tf.int64) elif df[f].dtype is np.dtype(np.float64): feature_spec[f] = tf.FixedLenFeature(shape=(), dtype=tf.float32) else: feature_spec[f] = tf.FixedLenFeature(shape=(), dtype=tf.string) return feature_spec # Creates simple numeric and categorical feature columns from a feature spec and a # list of columns from that spec to use. # # NOTE: Models might perform better with some feature engineering such as bucketed # numeric columns and hash-bucket/embedding columns for categorical features. def create_feature_columns(columns, feature_spec): ret = [] for col in columns: if feature_spec[col].dtype is tf.int64 or feature_spec[col].dtype is tf.float32: ret.append(tf.feature_column.numeric_column(col)) else: ret.append(tf.feature_column.indicator_column( tf.feature_column.categorical_column_with_vocabulary_list(col, list(df[col].unique())))) return ret # An input function for providing input to a model from tf.Examples def tfexamples_input_fn(examples, feature_spec, label, mode=tf.estimator.ModeKeys.EVAL, num_epochs=None, batch_size=64): def ex_generator(): for i in range(len(examples)): yield examples[i].SerializeToString() dataset = tf.data.Dataset.from_generator( ex_generator, tf.dtypes.string, tf.TensorShape([])) if mode == tf.estimator.ModeKeys.TRAIN: dataset = dataset.shuffle(buffer_size=2 * batch_size + 1) dataset = dataset.batch(batch_size) dataset = dataset.map(lambda tf_example: parse_tf_example(tf_example, label, feature_spec)) dataset = dataset.repeat(num_epochs) return dataset # Parses Tf.Example protos into features for the input function. def parse_tf_example(example_proto, label, feature_spec): parsed_features = tf.parse_example(serialized=example_proto, features=feature_spec) target = parsed_features.pop(label) return parsed_features, target # Converts a dataframe into a list of tf.Example protos. def df_to_examples(df, columns=None): examples = [] if columns == None: columns = df.columns.values.tolist() for index, row in df.iterrows(): example = tf.train.Example() for col in columns: if df[col].dtype is np.dtype(np.int64): example.features.feature[col].int64_list.value.append(int(row[col])) elif df[col].dtype is np.dtype(np.float64): example.features.feature[col].float_list.value.append(row[col]) elif row[col] == row[col]: example.features.feature[col].bytes_list.value.append(row[col].encode('utf-8')) examples.append(example) return examples # Converts a dataframe column into a column of 0's and 1's based on the provided test. # Used to force label columns to be numeric for binary classification using a TF estimator. def make_label_column_numeric(df, label_column, test): df[label_column] = np.where(test(df[label_column]), 1, 0) # + import pandas as pd df = pd.read_csv('https://storage.googleapis.com/what-if-tool-resources/computefest2019/cox-violent-parsed_filt.csv') df # + #@title Specify input columns and column to predict {display-mode: "form"} import numpy as np # Filter out entries with no indication of recidivism or no compass score df = df[df['is_recid'] != -1] df = df[df['decile_score'] != -1] # Rename recidivism column df['recidivism_within_2_years'] = df['is_recid'] # Make the COMPASS label column numeric (0 and 1), for use in our model df['COMPASS_determination'] = np.where(df['score_text'] == 'Low', 0, 1) # Set column to predict label_column = 'COMPASS_determination' # Get list of all columns from the dataset we will use for model input or output. input_features = ['sex', 'age', 'race', 'priors_count', 'juv_fel_count', 'juv_misd_count', 'juv_other_count'] features_and_labels = input_features + [label_column] features_for_file = input_features + ['recidivism_within_2_years', 'COMPASS_determination'] # - examples = df_to_examples(df, features_for_file) # + num_steps = 2000 #@param {type: "number"} tf.logging.set_verbosity(tf.logging.DEBUG) # Create a feature spec for the classifier feature_spec = create_feature_spec(df, features_and_labels) # Define and train the classifier train_inpf = functools.partial(tfexamples_input_fn, examples, feature_spec, label_column) classifier = tf.estimator.LinearClassifier( feature_columns=create_feature_columns(input_features, feature_spec)) classifier.train(train_inpf, steps=num_steps) # - # ### What-If Tool analysis # # We can see the same unfairness that ProPublica found in their analysis by: # 1. Going the the "Performance + Fairness" tab # 2. Setting "Ground Truth Feature" to "recidivism_within_2_years" # 3. Click "Optimize Threshold" # 4. In "Slice by" dropdown menu, select "race" # 5. Look at the confusion matrices of the "African-American" and "Causasian" slices. # - They have very similar accuracy (TP+FP) # - But, the FP rate is MUCH higher for African Americans and the FN rate is MUCH lower for caucasians # + num_datapoints = 10000 #@param {type: "number"} tool_height_in_px = 1000 #@param {type: "number"} from witwidget.notebook.visualization import WitConfigBuilder from witwidget.notebook.visualization import WitWidget # Setup the tool with the test examples and the trained classifier config_builder = WitConfigBuilder(examples[0:num_datapoints]).set_estimator_and_feature_spec( classifier, feature_spec) WitWidget(config_builder, height=tool_height_in_px) # -
main/nbs/poc/wit_tutorials/compas_recidivism_classifier/WIT COMPAS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="rln-I3USxo5F" colab_type="text" # Created by <NAME> # ## No installation requires. All the blocks of code are implements on Google Colab (colab.research.google.com) # The reasons of using Google Colab are (1) it is free of charge, (2) it does not require you to install any further libraries to your systems (which is really troublesome sometimes) and (3) it provides 12 consecutive hours of free GPU. # # ### Instructions: # 1. Go to colab.research.google.com (sign in with your gmail account) # 2. Upload this notebook to your working repository by clicking File - Upload notebook # 3. Change runtime: in order to use free GPU on google colab, please go to Runtime - Change runtime type - set Hardware accelerator to GPU # 4. Go through the following sections step by step to achieve the results in the paper. # + [markdown] colab_type="text" id="PGewKfSe5Y5k" # ## With VMD # + [markdown] colab_type="text" id="30nlXKoX5Y5m" # ### Setup environment, import libraries # + colab_type="code" id="x593rSdj5Y5m" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="881de3f4-0452-43c0-e1e4-59f50945b74a" import tensorflow as tf import pandas as pd import numpy as np import os from collections import Counter import matplotlib import matplotlib.pyplot as plt import random import shutil from google.colab import files from matplotlib.pyplot import figure from sklearn.model_selection import train_test_split #Set a fixed random value so that every time the code is running it will yield the same results. from numpy.random import seed seed(1611) #Set a fixed random value so that every time the code is running it will yield the same results. tf.random.set_seed(3116) # !git clone https://github.com/a11to1n3/StatFilterAppliedInSTLFDeeplearning.git # Change to the repository folder # %cd StatFilterAppliedInSTLFDeeplearning # Import all the neccessary modules in the cloned repository import dataWrangling import dataPCA import dataHourSplit import dataFilter import confidenceLevelFitness import LSTMModelBuilder import CNNLSTMModelBuilder import wavenet import RBMDAE import waveletFunction import modelBuilder # !pip install vmdpy from vmdpy import VMD # + [markdown] colab_type="text" id="Ux3PfeWE5Y5p" # ### Load dataset # + colab_type="code" id="3ndJG7ot5Y5p" colab={} # Run this and select the DayMarked.csv file in your computer files.upload() # + [markdown] colab_type="text" id="C3fsA_0H5Y5r" # ### Pre-process dataset to 133 dimensions and scale it # + colab_type="code" id="ainuh_dO5Y5s" colab={"base_uri": "https://localhost:8080/", "height": 808} outputId="c8971ef4-41d3-4f65-93f7-aa146b56dbd0" wrangledData = dataWrangling.csvToArray('DayMarkedNYISOO.csv') #wrangledData = dataWrangling.csvToArray('DayMarked (STLF Data Inputs Template) 13112019.csv') # Check for the shape of the array after wrangling the data print("The shape of the array is: {}".format(np.shape(wrangledData))) data_scaled, data_orig = dataPCA.diffandScaleISONE(wrangledData) # + [markdown] colab_type="text" id="xEJDoA0-5Y5t" # ### Search confidence level # + colab_type="code" id="HTcIDk3Y5Y5t" colab={} # Data filtered with confidence level of 90 data_90 = dataFilter.filterWithConfidenceLevel(data_scaled, 90) # Data filtered with confidence level of 91 data_91 = dataFilter.filterWithConfidenceLevel(data_scaled, 91) # Data filtered with confidence level of 92 data_92 = dataFilter.filterWithConfidenceLevel(data_scaled, 92) # Data filtered with confidence level of 93 data_93 = dataFilter.filterWithConfidenceLevel(data_scaled, 93) # Data filtered with confidence level of 94 data_94 = dataFilter.filterWithConfidenceLevel(data_scaled, 94) # Data filtered with confidence level of 95 data_95 = dataFilter.filterWithConfidenceLevel(data_scaled, 95) # Data filtered with confidence level of 96 data_96 = dataFilter.filterWithConfidenceLevel(data_scaled, 96) # Data filtered with confidence level of 97 data_97 = dataFilter.filterWithConfidenceLevel(data_scaled, 97) # Data filtered with confidence level of 98 data_98 = dataFilter.filterWithConfidenceLevel(data_scaled, 98) # Data filtered with confidence level of 99 data_99 = dataFilter.filterWithConfidenceLevel(data_scaled, 99) # Data filtered with confidence level of 99.73 data_3sigma = dataFilter.filterWithConfidenceLevel(data_scaled, 99.73) # Data filtered with confidence level of 99.99366 data_4sigma = dataFilter.filterWithConfidenceLevel(data_scaled, 99.99366) # Data filtered with confidence level of 99.99932 data_4sigma5 = dataFilter.filterWithConfidenceLevel(data_scaled, 99.99932) # + [markdown] colab_type="text" id="GwpsTLtB5Y6D" # ### Data preparation for the main forecasting algorithm # + [markdown] colab_type="text" id="Q5G6B3qq5Y6E" # #### With filter # + [markdown] colab_type="text" id="JgEh-hep-yQy" # ##### Do VMD # + [markdown] colab_type="text" id="5woGdh4V-DyG" # - VMD in this implementation is taken from the `vmdpy` library, built by the authors of the VMD methods # - VMD, by definitions, takes six parameters, which are: # - `alpha`: is defined as the moderate bandwidth constraint, empirically this has to be set to double the maximum samples in the training dataset # - `tau`: is defined as the noise tolerance factor, which is used to assure the faithfulness to the original time series of the decomposed series # - `K`: number of decomposition # - `DC`: involve/ not involve the DC component # - `init`: consists of three modes for initiating the omega factor: 0 - initiating with zero values; 1 - all omegas are uniformly distributed; 2 - all omegas are initiated randomly # - `tol`: tolerance of convergence criterion, typically 1e-6 # - This implementation of VMD requires a combination of correct settings of its parameters. For `alpha`, it is clear to set it to twice the training dataset samples, because it relates to the formulation $2*T_S$. `tau` is set to 0, as we would not want to tightly involve the noise into the decomposition as it will get very fit to the series, which can induce more noisy results. `K` is set to 5, as of the experiments, 5 is the turning point between good and bad results when inputing to our forecasting model. We would not want a `DC` component in the decompositions as it affects their smoothness. We set `init` to 1, as we want to initiate the omegas with uniformly distributed. # # # + colab_type="code" id="jOLNp1bP-yQ3" colab={} #. some sample parameters for VMD alpha = 2*len(data_95[:-int(0.1*len(data_95)),:,0].reshape(-1)) # moderate bandwidth constraint tau = 0. # noise-tolerance (no strict fidelity enforcement) K = 5 # 5 modes DC = False # no DC part imposed init = 1 # initialize omegas uniformly tol = 1e-6 u, u_hat, omega = VMD(data_95[:-int(0.1*len(data_95)),:,0].reshape(-1), alpha, tau, K, DC, init, tol) # + colab_type="code" id="3t8WGipV-yQ7" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="288bdeac-9805-42c8-e742-3c173a28e552" print("Component 1") plt.plot(u.T[:,0]) plt.show() print("Component 2") plt.plot(u.T[:,1]) plt.show() print("Component 3") plt.plot(u.T[:,2]) plt.show() print("Component 4") plt.plot(u.T[:,3]) plt.show() print("Component 5") plt.plot(u.T[:,4]) plt.show() # + colab_type="code" id="iqSesJPm-yQ9" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6e9f9276-e4d7-4f16-9614-03d9be859d66" import copy # split to 5 subsets data_scaled1 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled1[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled2 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled2[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled3 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled3[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled4 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled4[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled5 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled5[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled1[:,:,0] = u[0,:].reshape(-1,24) data_scaled2[:,:,0] = u[1,:].reshape(-1,24) data_scaled3[:,:,0] = u[2,:].reshape(-1,24) data_scaled4[:,:,0] = u[3,:].reshape(-1,24) data_scaled5[:,:,0] = u[4,:].reshape(-1,24) print(data_scaled1.shape) # + [markdown] colab_type="text" id="li7RMVn95Y6I" # #### No filter # + [markdown] id="Je8buYTLry5z" colab_type="text" # ##### Do VMD # + [markdown] id="6Sx2ttAtyQrF" colab_type="text" # - VMD in this implementation is taken from the `vmdpy` library, built by the authors of the VMD methods # - VMD, by definitions, takes six parameters, which are: # - `alpha`: is defined as the moderate bandwidth constraint, empirically this has to be set to double the maximum samples in the training dataset # - `tau`: is defined as the noise tolerance factor, which is used to assure the faithfulness to the original time series of the decomposed series # - `K`: number of decomposition # - `DC`: involve/ not involve the DC component # - `init`: consists of three modes for initiating the omega factor: 0 - initiating with zero values; 1 - all omegas are uniformly distributed; 2 - all omegas are initiated randomly # - `tol`: tolerance of convergence criterion, typically 1e-6 # - This implementation of VMD requires a combination of correct settings of its parameters. For `alpha`, it is clear to set it to twice the training dataset samples, because it relates to the formulation $2*T_S$. `tau` is set to 0, as we would not want to tightly involve the noise into the decomposition as it will get very fit to the series, which can induce more noisy results. `K` is set to 5, as of the experiments, 5 is the turning point between good and bad results when inputing to our forecasting model. We would not want a `DC` component in the decompositions as it affects their smoothness. We set `init` to 1, as we want to initiate the omegas with uniformly distributed. # # # + id="fUNO9o51WIJn" colab_type="code" colab={} #. some sample parameters for VMD alpha = 2*len(data_scaled[:-int(0.1*len(data_scaled)),:,0].reshape(-1)) # moderate bandwidth constraint tau = 0. # noise-tolerance (no strict fidelity enforcement) K = 5 # 5 modes DC = False # no DC part imposed init = 1 # initialize omegas uniformly tol = 1e-6 u, u_hat, omega = VMD(data_scaled[:-int(0.1*len(data_scaled)),:,0].reshape(-1), alpha, tau, K, DC, init, tol) # + id="SkPsc_cescWm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e1167d83-67db-4ee4-f0ad-f18cd8dbb6b5" print("Component 1") plt.plot(u.T[:,0]) plt.show() print("Component 2") plt.plot(u.T[:,1]) plt.show() print("Component 3") plt.plot(u.T[:,2]) plt.show() print("Component 4") plt.plot(u.T[:,3]) plt.show() print("Component 5") plt.plot(u.T[:,4]) plt.show() # + id="aoLyM2eJr2PH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e6b3ab8c-3fd0-4a70-e751-d767f5b2098a" import copy # split to 5 subsets data_scaled1 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled1[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled2 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled2[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled3 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled3[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled4 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled4[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled5 = np.zeros_like(data_scaled[:-int(0.1*len(data_scaled))]) data_scaled5[:,:,1:] = copy.deepcopy(data_scaled[:-int(0.1*len(data_scaled)),:,1:]) data_scaled1[:,:,0] = u[0,:].reshape(-1,24) data_scaled2[:,:,0] = u[1,:].reshape(-1,24) data_scaled3[:,:,0] = u[2,:].reshape(-1,24) data_scaled4[:,:,0] = u[3,:].reshape(-1,24) data_scaled5[:,:,0] = u[4,:].reshape(-1,24) print(data_scaled1.shape) # + [markdown] colab_type="text" id="KWrZ8yST5Y6N" # ### Model preparation # + [markdown] colab_type="text" id="PN8Wfbbi5Y6N" # #### LSTM # + colab_type="code" id="xE1U1PvI5Y6I" colab={} inp1 = data_scaled1[:-int(0.1*len(data_scaled1))-1] out1 = data_scaled1[1:-int(0.1*len(data_scaled1))] inp2 = data_scaled2[:-int(0.1*len(data_scaled2))-1] out2 = data_scaled2[1:-int(0.1*len(data_scaled2))] inp3 = data_scaled3[:-int(0.1*len(data_scaled3))-1] out3 = data_scaled3[1:-int(0.1*len(data_scaled3))] inp4 = data_scaled4[:-int(0.1*len(data_scaled4))-1] out4 = data_scaled4[1:-int(0.1*len(data_scaled4))] inp5 = data_scaled5[:-int(0.1*len(data_scaled5))-1] out5 = data_scaled5[1:-int(0.1*len(data_scaled5))] # + colab_type="code" id="pYjkbHxS5Y6N" colab={} # for pure keras x_train1, x_val1, y_train1, y_val1 = train_test_split(inp1,out1, random_state=42, test_size =0.33,shuffle=False) BATCH_SIZE = 4 BUFFER_SIZE = 1000 train1 = tf.data.Dataset.from_tensor_slices((x_train1, y_train1[:,:,0])) #train = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat().prefetch(1) train1 = train1.cache().batch(BATCH_SIZE).repeat() val1 = tf.data.Dataset.from_tensor_slices((x_val1, y_val1[:,:,0])) val1 = val1.batch(BATCH_SIZE).repeat().prefetch(1) # + colab_type="code" id="YWsZ2jeB5Y6P" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="ca125218-793a-462c-f3ed-10fe35b8e8dc" model1 = LSTMModelBuilder.build(data_scaled1.shape[1],data_scaled1.shape[2]) model1.summary() # + id="eu05JFFyZ3yX" colab_type="code" colab={} # for pure keras x_train2, x_val2, y_train2, y_val2 = train_test_split(inp2,out2, random_state=42, test_size =0.33,shuffle=False) BATCH_SIZE = 4 BUFFER_SIZE = 1000 train2 = tf.data.Dataset.from_tensor_slices((x_train2, y_train2[:,:,0])) #train = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat().prefetch(1) train2 = train2.cache().batch(BATCH_SIZE).repeat() val2 = tf.data.Dataset.from_tensor_slices((x_val2, y_val2[:,:,0])) val2 = val2.batch(BATCH_SIZE).repeat().prefetch(1) # + id="-TdgOg4FZ5bN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="61320e1a-7ca5-4f14-9bca-ef5cc22312eb" model2 = LSTMModelBuilder.build(data_scaled2.shape[1],data_scaled2.shape[2]) model2.summary() # + id="CkNJddDPaKRi" colab_type="code" colab={} # for pure keras x_train3, x_val3, y_train3, y_val3 = train_test_split(inp3,out3, random_state=42, test_size =0.33,shuffle=False) BATCH_SIZE = 4 BUFFER_SIZE = 1000 train3 = tf.data.Dataset.from_tensor_slices((x_train3, y_train3[:,:,0])) #train = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat().prefetch(1) train3 = train3.cache().batch(BATCH_SIZE).repeat() val3 = tf.data.Dataset.from_tensor_slices((x_val3, y_val3[:,:,0])) val3 = val3.batch(BATCH_SIZE).repeat().prefetch(1) # + id="0zyAVVFIaW8X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="55dad486-e0bb-4fb9-e9de-c92e358bde8d" model3 = LSTMModelBuilder.build(data_scaled3.shape[1],data_scaled3.shape[2]) model3.summary() # + id="qW-cJanFagjC" colab_type="code" colab={} # for pure keras x_train4, x_val4, y_train4, y_val4 = train_test_split(inp4,out4, random_state=42, test_size =0.33,shuffle=False) BATCH_SIZE = 4 BUFFER_SIZE = 1000 train4 = tf.data.Dataset.from_tensor_slices((x_train4, y_train4[:,:,0])) #train = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat().prefetch(1) train4 = train4.cache().batch(BATCH_SIZE).repeat() val4 = tf.data.Dataset.from_tensor_slices((x_val4, y_val4[:,:,0])) val4 = val4.batch(BATCH_SIZE).repeat().prefetch(1) # + id="5i6dE7BfarQM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="8a1f1637-fa10-49f4-a0d2-6b6efbeeefdc" model4 = LSTMModelBuilder.build(data_scaled4.shape[1],data_scaled4.shape[2]) model4.summary() # + id="TENCmmlUa2Ka" colab_type="code" colab={} # for pure keras x_train5, x_val5, y_train5, y_val5 = train_test_split(inp5,out5, random_state=42, test_size =0.33,shuffle=False) BATCH_SIZE = 4 BUFFER_SIZE = 1000 train5 = tf.data.Dataset.from_tensor_slices((x_train5, y_train5[:,:,0])) #train = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat().prefetch(1) train5 = train5.cache().batch(BATCH_SIZE).repeat() val5 = tf.data.Dataset.from_tensor_slices((x_val5, y_val5[:,:,0])) val5 = val5.batch(BATCH_SIZE).repeat().prefetch(1) # + id="1RS-pN5PbZ-U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="988b7cbb-f142-4aca-b6f9-57ef35e6b5a8" model5 = LSTMModelBuilder.build(data_scaled5.shape[1],data_scaled5.shape[2]) model5.summary() # + [markdown] colab_type="text" id="YP8UvQIA5Y6Q" # ##### Training # + [markdown] colab_type="text" id="lKvVIYTf5Y6Q" # ###### With filter # + colab_type="code" id="3coWo2l_5Y6Q" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="08ddcedd-d570-4698-b253-6c2ae9abd711" #for tensorflow 2.0 import datetime # plot_val1 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model1.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp1)): # test_pred = model1.predict(test_inp1[i].reshape(-1,data_scaled1.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler1.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val1.append([real_pred.T.reshape(-1),pretest_out1[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out1[i,:,0].reshape(-1)))/pretest_out1[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out1[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out1[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp1)) # print(np.sum(AVG1)/len(test_inp1)) # print((np.sqrt(np.sum(AVG2)/len(test_inp1)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model1.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model1.fit(train1, steps_per_epoch=x_train1.shape[0]//BATCH_SIZE, epochs=30, validation_data=val1, validation_steps=x_val1.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + id="wAEfX1ql-sGB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="5fb04249-9aae-4c8e-b08d-212c1d3143dd" #for tensorflow 2.0 import datetime # plot_val2 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model2.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp2)): # test_pred = model2.predict(test_inp2[i].reshape(-1,data_scaled2.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler2.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val2.append([real_pred.T.reshape(-1),pretest_out2[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out2[i,:,0].reshape(-1)))/pretest_out2[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out2[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out2[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp2)) # print(np.sum(AVG1)/len(test_inp2)) # print((np.sqrt(np.sum(AVG2)/len(test_inp2)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model2.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model2.fit(train2, steps_per_epoch=x_train2.shape[0]//BATCH_SIZE, epochs=30, validation_data=val2, validation_steps=x_val2.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + id="968e1Fl4-3tM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="7049a259-cb3c-4813-e7be-35927affc2e9" #for tensorflow 2.0 import datetime # plot_val3 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model3.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp3)): # test_pred = model3.predict(test_inp3[i].reshape(-1,data_scaled3.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler3.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val3.append([real_pred.T.reshape(-1),pretest_out3[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out3[i,:,0].reshape(-1)))/pretest_out3[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out3[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out3[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp3)) # print(np.sum(AVG1)/len(test_inp3)) # print((np.sqrt(np.sum(AVG2)/len(test_inp3)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model3.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model3.fit(train3, steps_per_epoch=x_train3.shape[0]//BATCH_SIZE, epochs=30, validation_data=val3, validation_steps=x_val3.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + id="-G5BJQEt-9cD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="3b059479-d698-491b-9b3f-f6f57eb23a88" #for tensorflow 2.0 import datetime # plot_val4 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model4.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp4)): # test_pred = model4.predict(test_inp4[i].reshape(-1,data_scaled4.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler4.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val4.append([real_pred.T.reshape(-1),pretest_out4[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out4[i,:,0].reshape(-1)))/pretest_out4[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out4[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out4[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp4)) # print(np.sum(AVG1)/len(test_inp4)) # print((np.sqrt(np.sum(AVG2)/len(test_inp4)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model4.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model4.fit(train4, steps_per_epoch=x_train4.shape[0]//BATCH_SIZE, epochs=30, validation_data=val4, validation_steps=x_val4.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + id="7hcNxFb7--XV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="49440168-4460-4b55-cdff-9adb484d432a" #for tensorflow 2.0 import datetime # plot_val5 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model5.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp5)): # test_pred = model5.predict(test_inp5[i].reshape(-1,data_scaled5.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler5.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val5.append([real_pred.T.reshape(-1),pretest_out5[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out5[i,:,0].reshape(-1)))/pretest_out5[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out5[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out5[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp5)) # print(np.sum(AVG1)/len(test_inp5)) # print((np.sqrt(np.sum(AVG2)/len(test_inp5)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model5.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model5.fit(train5, steps_per_epoch=x_train5.shape[0]//BATCH_SIZE, epochs=30, validation_data=val5, validation_steps=x_val5.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + colab_type="code" id="Ny9b4T88_hGQ" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="2f13f185-4a3c-4d16-fc17-f70dbb080bf8" #. some sample parameters for VMD tau = 0. # noise-tolerance (no strict fidelity enforcement) K = 5 # 5 modes DC = False # no DC part imposed init = 1 # initialize omegas uniformly tol = 1e-6 mape = [] AVG = [] AVG1 = [] AVG2 = [] for i in range(-int(0.1*len(data_scaled)),0): print(i) alpha = 2*len(data_scaled[:i,:,0].reshape(-1)) # moderate bandwidth constraint u, u_hat, omega = VMD(data_scaled[:i,:,0].reshape(-1), alpha, tau, K, DC, init, tol) # split to 5 subsets test_scaled1 = np.zeros_like(data_scaled[:i]) test_scaled1[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) test_scaled2 = np.zeros_like(data_scaled[:i]) test_scaled2[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) test_scaled3 = np.zeros_like(data_scaled[:i]) test_scaled3[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) test_scaled4 = np.zeros_like(data_scaled[:i]) test_scaled4[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) test_scaled5 = np.zeros_like(data_scaled[:i]) test_scaled5[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) test_scaled1[:,:,0] = u[0,:].reshape(-1,24) test_scaled2[:,:,0] = u[1,:].reshape(-1,24) test_scaled3[:,:,0] = u[2,:].reshape(-1,24) test_scaled4[:,:,0] = u[3,:].reshape(-1,24) test_scaled5[:,:,0] = u[4,:].reshape(-1,24) el1 = model1.predict(test_scaled1[-1:,:,:]) el2 = model2.predict(test_scaled2[-1:,:,:]) el3 = model3.predict(test_scaled3[-1:,:,:]) el4 = model4.predict(test_scaled4[-1:,:,:]) el5 = model5.predict(test_scaled5[-1:,:,:]) el_tol = el1 + el2 + el3 + el4 + el5 real_pred = np.log(data_orig[i-1,:,0].T) + el_tol real_pred = np.e**real_pred print(real_pred) print(data_orig[i,:,0]) AVG.append(np.sum(abs((real_pred.T.reshape(-1)-data_orig[i,:,0].reshape(-1)))/data_orig[i,:,0].reshape(-1))*100/24) AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-data_orig[i,:,0].reshape(-1))))/24) AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-data_orig[i,:,0].reshape(-1))))/24)**2) print(AVG[-1:]) print(AVG1[-1:]) print(AVG2[-1:]) AVG = np.array(AVG) AVG1 = np.array(AVG1) AVG2 = np.array(AVG2) print(np.sum(AVG)/len(range(-int(0.1*len(data_scaled)),0))) print(np.sum(AVG1)/len(range(-int(0.1*len(data_scaled)),0))) print((np.sqrt(np.sum(AVG2)/len(range(-int(0.1*len(data_scaled)),0))))) # + colab_type="code" id="qRel7k8K5Y6S" colab={} #with VMD 5.48748199256476 1089.9457167028497 1346.15180201413 #without VMD 4.231860362987466 861.2655625059399 1095.8509395420504 # + [markdown] colab_type="text" id="qFneYveG5Y6T" # ###### No filter # + colab_type="code" id="mXui8vqW5Y6T" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="cdf4b00b-65d2-4fb7-943d-241d0c548344" #for tensorflow 2.0 import datetime # plot_val1 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model1.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp1)): # test_pred = model1.predict(test_inp1[i].reshape(-1,data_scaled1.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler1.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val1.append([real_pred.T.reshape(-1),pretest_out1[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out1[i,:,0].reshape(-1)))/pretest_out1[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out1[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out1[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp1)) # print(np.sum(AVG1)/len(test_inp1)) # print((np.sqrt(np.sum(AVG2)/len(test_inp1)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model1.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model1.fit(train1, steps_per_epoch=x_train1.shape[0]//BATCH_SIZE, epochs=30, validation_data=val1, validation_steps=x_val1.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + id="8sN8FSW7cfJ1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="841ccf22-4c71-48fa-880a-851de38daf4d" #for tensorflow 2.0 import datetime # plot_val2 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model2.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp2)): # test_pred = model2.predict(test_inp2[i].reshape(-1,data_scaled2.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler2.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val2.append([real_pred.T.reshape(-1),pretest_out2[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out2[i,:,0].reshape(-1)))/pretest_out2[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out2[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out2[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp2)) # print(np.sum(AVG1)/len(test_inp2)) # print((np.sqrt(np.sum(AVG2)/len(test_inp2)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model2.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model2.fit(train2, steps_per_epoch=x_train2.shape[0]//BATCH_SIZE, epochs=30, validation_data=val2, validation_steps=x_val2.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + id="s2yUqGL-dCxd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b484984c-c8c1-455d-999f-122b36853071" #for tensorflow 2.0 import datetime # plot_val3 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model3.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp3)): # test_pred = model3.predict(test_inp3[i].reshape(-1,data_scaled3.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler3.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val3.append([real_pred.T.reshape(-1),pretest_out3[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out3[i,:,0].reshape(-1)))/pretest_out3[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out3[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out3[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp3)) # print(np.sum(AVG1)/len(test_inp3)) # print((np.sqrt(np.sum(AVG2)/len(test_inp3)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model3.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model3.fit(train3, steps_per_epoch=x_train3.shape[0]//BATCH_SIZE, epochs=30, validation_data=val3, validation_steps=x_val3.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + id="p9Agd3Mldon8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="49e97b06-e881-4798-bd34-4a37a735dc6c" #for tensorflow 2.0 import datetime # plot_val4 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model4.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp4)): # test_pred = model4.predict(test_inp4[i].reshape(-1,data_scaled4.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler4.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val4.append([real_pred.T.reshape(-1),pretest_out4[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out4[i,:,0].reshape(-1)))/pretest_out4[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out4[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out4[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp4)) # print(np.sum(AVG1)/len(test_inp4)) # print((np.sqrt(np.sum(AVG2)/len(test_inp4)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model4.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model4.fit(train4, steps_per_epoch=x_train4.shape[0]//BATCH_SIZE, epochs=30, validation_data=val4, validation_steps=x_val4.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + id="HtWV2Lp-eDPS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="5a16b569-66ab-4cb3-d2c6-d9e0e0670989" #for tensorflow 2.0 import datetime # plot_val5 = [] # class MyCustomCallback(tf.keras.callbacks.Callback): # def on_epoch_end(self, epoch, logs=None): # print('Evaluating: epoch {} ends at {} with MAPE '.format(epoch, datetime.datetime.now().time())) # checkpoint_path = './'+str(datetime.datetime.now().time())+"_cp.h5" # #checkpoint_dir = os.path.dirname(checkpoint_path) # model5.save(checkpoint_path) # AVG = [] # AVG1 = [] # AVG2 = [] # pred = [] # for i in range(len(test_inp5)): # test_pred = model5.predict(test_inp5[i].reshape(-1,data_scaled5.shape[1],133)) # pred.append(test_pred) # real_pred = data_scaler5.inverse_transform(test_pred) # # real_pred = np.log(pretest_inp[i,:,0].T) + test_pred # # real_pred = np.e**real_pred # #print(test_pred.shape) # #print(real_pred) # #print(pretest_out[i,:,0]) # #print(abs((real_pred.T.reshape(-1)-pretest_out[i,:,0].reshape(-1)))/pretest_out[i,:,0].reshape(-1)) # plot_val5.append([real_pred.T.reshape(-1),pretest_out5[i,:,0].reshape(-1)]) # #AVG.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1)))/test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))*100/24) # #AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24) # #AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-test_scaler.inverse_transform(pretest_out[i,:,0:1].T).reshape(-1))))/24)**2) # AVG.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out5[i,:,0].reshape(-1)))/pretest_out5[i,:,0].reshape(-1))*100/24) # AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-pretest_out5[i,:,0].reshape(-1))))/24) # AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-pretest_out5[i,:,0].reshape(-1))))/24)**2) # AVG = np.array(AVG) # AVG1 = np.array(AVG1) # AVG2 = np.array(AVG2) # pred=np.array(pred) # print(np.sum(AVG)/len(test_inp5)) # print(np.sum(AVG1)/len(test_inp5)) # print((np.sqrt(np.sum(AVG2)/len(test_inp5)))) def scheduler(epoch): if epoch < 5: return 0.01 else: return 0.001 * np.math.exp(0.1 * (1 - epoch)) log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback = tf.keras.callbacks.LearningRateScheduler(scheduler) #callback2 = tf.keras.callbacks.EarlyStopping(mode="min", monitor='val_loss') model5.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae',metrics=['mse','mape']) # fit network history = model5.fit(train5, steps_per_epoch=x_train5.shape[0]//BATCH_SIZE, epochs=30, validation_data=val5, validation_steps=x_val5.shape[0]//BATCH_SIZE,callbacks=[tensorboard_callback, callback]) # + id="V5aizrsLY79E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="866b9eff-c4ed-4c58-8582-567de2bb4652" #. some sample parameters for VMD tau = 0. # noise-tolerance (no strict fidelity enforcement) K = 5 # 5 modes DC = False # no DC part imposed init = 1 # initialize omegas uniformly tol = 1e-6 mape = [] AVG = [] AVG1 = [] AVG2 = [] for i in range(-int(0.1*len(data_scaled)),0): print(i) alpha = 2*len(data_scaled[:i,:,0].reshape(-1)) # moderate bandwidth constraint u, u_hat, omega = VMD(data_scaled[:i,:,0].reshape(-1), alpha, tau, K, DC, init, tol) # split to 5 subsets test_scaled1 = np.zeros_like(data_scaled[:i]) test_scaled1[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) test_scaled2 = np.zeros_like(data_scaled[:i]) test_scaled2[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) test_scaled3 = np.zeros_like(data_scaled[:i]) test_scaled3[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) # test_scaled4 = np.zeros_like(data_scaled[:i]) # test_scaled4[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) # test_scaled5 = np.zeros_like(data_scaled[:i]) # test_scaled5[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) # test_scaled6 = np.zeros_like(data_scaled[:i]) # test_scaled6[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) # test_scaled7 = np.zeros_like(data_scaled[:i]) # test_scaled7[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) # test_scaled8 = np.zeros_like(data_scaled[:i]) # test_scaled8[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) # test_scaled9 = np.zeros_like(data_scaled[:i]) # test_scaled9[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) # test_scaled10 = np.zeros_like(data_scaled[:i]) # test_scaled10[:,:,1:] = copy.deepcopy(data_scaled[:i,:,1:]) test_scaled1[:,:,0] = u[0,:].reshape(-1,24) test_scaled2[:,:,0] = u[1,:].reshape(-1,24) test_scaled3[:,:,0] = u[2,:].reshape(-1,24) # test_scaled4[:,:,0] = u[3,:].reshape(-1,24) # test_scaled5[:,:,0] = u[4,:].reshape(-1,24) # test_scaled6[:,:,0] = u[5,:].reshape(-1,24) # test_scaled7[:,:,0] = u[6,:].reshape(-1,24) # test_scaled8[:,:,0] = u[7,:].reshape(-1,24) # test_scaled9[:,:,0] = u[8,:].reshape(-1,24) # test_scaled10[:,:,0] = u[9,:].reshape(-1,24) el1 = model1.predict(test_scaled1[-1:,:,:]) el2 = model2.predict(test_scaled2[-1:,:,:]) el3 = model3.predict(test_scaled3[-1:,:,:]) # el4 = model4.predict(test_scaled4[-1:,:,:]) # el5 = model5.predict(test_scaled5[-1:,:,:]) # el6 = model6.predict(test_scaled6[-1:,:,:]) # el7 = model7.predict(test_scaled7[-1:,:,:]) # el8 = model8.predict(test_scaled8[-1:,:,:]) # el9 = model9.predict(test_scaled9[-1:,:,:]) # el10 = model10.predict(test_scaled10[-1:,:,:]) el_tol = el1 + el2 + el3 real_pred = np.log(data_orig[i-1,:,0].T) + el_tol real_pred = np.e**real_pred print(real_pred) print(data_orig[i,:,0]) AVG.append(np.sum(abs((real_pred.T.reshape(-1)-data_orig[i,:,0].reshape(-1)))/data_orig[i,:,0].reshape(-1))*100/24) AVG1.append(np.sum(abs((real_pred.T.reshape(-1)-data_orig[i,:,0].reshape(-1))))/24) AVG2.append((np.sum(abs((real_pred.T.reshape(-1)-data_orig[i,:,0].reshape(-1))))/24)**2) print(AVG[-1:]) print(AVG1[-1:]) print(AVG2[-1:]) AVG = np.array(AVG) AVG1 = np.array(AVG1) AVG2 = np.array(AVG2) print(np.sum(AVG)/len(range(-int(0.1*len(data_scaled)),0))) print(np.sum(AVG1)/len(range(-int(0.1*len(data_scaled)),0))) print((np.sqrt(np.sum(AVG2)/len(range(-int(0.1*len(data_scaled)),0))))) # + colab_type="code" id="c7TLK3jX5Y6U" colab={} #with VMD 5.522015617026129 1099.3017824115345 1354.1892244203907 #without VMD 4.297441530099145 877.068285431094 1108.1276932773046
VMD_Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MAT281 - Laboratorio N°03 # # # ## Problema 01 # # # <img src="https://imagenes.universia.net/gc/net/images/practicas-empleo/p/pr/pro/profesiones-con-el-avance-de-la-tecnologia.jpg" width="480" height="360" align="center"/> # # # EL conjunto de datos se denomina `ocupation.csv`, el cual contiene información de distintos usuarios (edad ,sexo, profesión, etc.). # # Lo primero es cargar el conjunto de datos y ver las primeras filas que lo componen: import pandas as pd import os # cargar datos df = pd.read_csv(os.path.join("data","ocupation.csv"), sep="|") df.head() # El objetivo es tratar de obtener la mayor información posible de este conjunto de datos. Para cumplir este objetivo debe resolver las siguientes problemáticas: # # 1.- ¿Cuál es el número de observaciones en el conjunto de datos? g = df.shape[0] print(f'Se tienen {g} observaciones.') # 2.- ¿Cuál es el número de columnas en el conjunto de datos? g = df.shape[1] print(f'Se tienen {g} columnas.') # 3.- Imprime el nombre de todas las columnas print("\ncols:") df.columns # 4.- Imprima el índice del dataframe print("\nindex:") df.index # 5.- ¿Cuál es el tipo de datos de cada columna? df.dtypes # 6.- Describir el conjunto de datos (**hint**: .describe()) df.describe(include='all') # 7.- Imprimir solo la columna de **occupation**. df['occupation'] # 8.- ¿Cuántas ocupaciones diferentes hay en este conjunto de datos? df['occupation'].nunique() # 9.- ¿Cuál es la ocupación más frecuente? df['occupation'].value_counts().idxmax() # 10.- ¿Cuál es la edad media de los usuarios? mean_df = df['age'].mean() print(mean_df) # 11.- ¿Cuál es la edad con menos ocurrencia? df['age'].value_counts().idxmin() # 12.- Encontrar la edad promedio según la variable **occupation** a = df.groupby('occupation').describe() a['age'] # ## Problema 02 # # # <img src="https://image.freepik.com/vector-gratis/varios-automoviles-dibujos-animados_23-2147613095.jpg" width="360" height="360" align="center"/> # # # EL conjunto de datos se denomina `Automobile_data.csv`, el cual contiene información tal como: compañia, precio, kilometraje, etc. # # Lo primero es cargar el conjunto de datos y ver las primeras filas que lo componen: # cargar datos df = pd.read_csv(os.path.join("data","Automobile_data.csv")).set_index('index') df.head() # El objetivo es tratar de obtener la mayor información posible de este conjunto de datos. Para cumplir este objetivo debe resolver las siguientes problemáticas: # # 1. Elimine los valores nulos (Nan) mask = lambda df: df.notnull().all(axis=1) df = df[mask] df.head() # 2. Encuentra el nombre de la compañía de automóviles más cara a=df['price'].idxmax() df['company'][a] # 3. Imprimir todos los detalles de Toyota Cars grouped_data = df.groupby('company') grouped_data.describe() # 4. Cuente el total de automóviles por compañía grouped_data = df.groupby('company') grouped_data.count()['body-style'] # 5. Encuentra el coche con el precio más alto por compañía grouped_data = df.groupby('company') grouped_data.max()['price'] # 6. Encuentre el kilometraje promedio (**average-mileage**) de cada compañía automotriz grouped_data = df.groupby('company') grouped_data.mean()['average-mileage'] # 7. Ordenar todos los autos por columna de precio (**price**) by_year = df.sort_values('price',ascending=False)# ordenado de mayor a menor by_year # ## Problema 03 # # # Siguiendo la temática de los automóviles, resuelva los siguientes problemas: # # ### a) Subproblema 01 # A partir de los siguientes diccionarios: GermanCars = {'Company': ['Ford', 'Mercedes', 'BMV', 'Audi'], 'Price': [23845, 171995, 135925 , 71400]} japaneseCars = {'Company': ['Toyota', 'Honda', 'Nissan', 'Mitsubishi '], 'Price': [29995, 23600, 61500 , 58900]} # * Cree dos dataframes (**carsDf1** y **carsDf2**) según corresponda. # * Concatene ambos dataframes (**carsDf**) y añada una llave ["Germany", "Japan"], según corresponda. # dataframe with GermanCars carsDf1 = pd.DataFrame( { 'Company': ['Ford', 'Mercedes', 'BMV', 'Audi'], 'Price': [23845, 171995, 135925 , 71400], } ) carsDf1 # dataframe with japaneseCars carsDf2 = pd.DataFrame( { 'Company': ['Toyota', 'Honda', 'Nissan', 'Mitsubishi '], 'Price': [29995, 23600, 61500 , 58900], } ) carsDf2 carsDf = pd.concat([carsDf1, carsDf2], keys=["Germany", "Japan"]) carsDf # ### b) Subproblema 02 # # A partir de los siguientes diccionarios: Car_Price = {'Company': ['Toyota', 'Honda', 'BMV', 'Audi'], 'Price': [23845, 17995, 135925 , 71400]} car_Horsepower = {'Company': ['Toyota', 'Honda', 'BMV', 'Audi'], 'horsepower': [141, 80, 182 , 160]} # * Cree dos dataframes (**carsDf1** y **carsDf2**) según corresponda. # * Junte ambos dataframes (**carsDf**) por la llave **Company**. carsDf1 = pd.DataFrame( { 'Company': ['Toyota', 'Honda', 'BMV', 'Audi'], 'Price': [23845, 17995, 135925 , 71400], } ) carsDf1 carsDf2 = pd.DataFrame( { 'Company': ['Toyota', 'Honda', 'BMV', 'Audi'], 'horsepower': [141, 80, 182 , 160], } ) carsDf2 # + carsDf = pd.merge(carsDf1, carsDf2, on='Company') carsDf # -
labs/lab_03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="OmyeuoMHhHMi" outputId="1d37ecdb-0da7-414a-d0fe-c4d9f655a57b" pip install tifffile # + id="KEQ-MA7WaVa3" import cv2 import numpy as np import matplotlib.pyplot as plt import os from tifffile import imread from PIL import Image from google.colab.patches import cv2_imshow import random import torch from torch.utils.data import DataLoader, Dataset from torch import nn from tqdm import tqdm from torch.utils.tensorboard import SummaryWriter from google.colab.patches import cv2_imshow # + id="BsSAqlw12C69" os.environ['CUDA_LAUNCH_BLOCKING'] = "1" device = "cuda" # + colab={"base_uri": "https://localhost:8080/"} id="My86lzKmEYel" outputId="a317ab08-7962-4a7f-da84-72c90366ce12" from google.colab import drive drive.mount('/content/drive') # + id="Ydq1r0CQbxEN" data_path = "/content/drive/MyDrive/Sentinel/" checkpoint_path = '/content/drive/MyDrive/Satellite_image_segmenattion/Checkpoints/unet_rgb_segmentation.pth.tar' results_image_path = "/content/drive/MyDrive/Satellite_image_segmenattion/saved_image/" images_path = data_path + "images" labels_path = data_path + "labels" images_id = [f.name for f in os.scandir(images_path)] # + [markdown] id="ZRtR0HOsdoYG" # ## **Visualization of a RGB Image along with its corresponding mask.** # + id="BdDAVRZnhg5Z" def read_image(images_path,labels_path,id): image = os.path.join(images_path,images_id[id],"07.tif") label = os.path.join(labels_path,images_id[id],"dlt.tif") img = imread(image) label = imread(label) return img,label def image_preprocessing(img): rgb_image = img[:,:,[2,1,0]] rgb_image = np.int16(rgb_image) rgb_image = np.clip(rgb_image,0,1500) rgb_image = rgb_image/1500 * 255 rgb_image = np.float32(rgb_image) return rgb_image def labelVisualize(num_class, color_dict, img): img = img[:,:,0] if len(img.shape) == 3 else img img_out = np.zeros((64, 64, 3)) for i in range(num_class): img_out[img == i,:] = np.asarray(color_dict[i]) return img_out color_dict = { 0: [255, 0, 0], 1: [0, 255, 0], 2: [0, 0, 255], } # + id="8sgphdfPTF2m" # visualization on 1 sample image image,label = read_image(images_path,labels_path,id=20) num_classes = len(np.unique(label)) # No of classes for segmentation rgb_image = image_preprocessing(image) # visualization on 2 sample image image1,label1 = read_image(images_path,labels_path,id=5) rgb_image_01 = image_preprocessing(image1) # + id="Jv0bpqJJtNSd" colab={"base_uri": "https://localhost:8080/", "height": 362} outputId="b2d07f6a-721d-413a-be24-63397adfa6f6" label = labelVisualize(num_classes,color_dict,label) label_01 = labelVisualize(num_classes,color_dict,label1) print("Image") cv2_imshow(rgb_image) print("Label") cv2_imshow(label) print("----------------------") print("Image") cv2_imshow(rgb_image_01) print("Label") cv2_imshow(label_01) # + [markdown] id="1bjls-VHeNRY" # **Next shuffle and split the images in train(70%), val(20%), test(10%). I have stored the id's of the image in 3 seperate file.** # # + id="LgwAs6RyF3L7" colab={"base_uri": "https://localhost:8080/"} outputId="9d24be82-c0b8-48c6-eae3-a7ad3e90ed18" if os.path.exists('/content/drive/MyDrive/Sentinel/train.txt'): print("Train split file already exist") else: random.shuffle(images_id) train_image_id = images_id[0:3960] with open("/content/drive/MyDrive/Sentinel/train.txt", "w") as f: for train_item in train_image_id: f.write("%s\n" % train_item) if os.path.exists('/content/drive/MyDrive/Sentinel/val.txt'): print("Val split file already exist") else: val_image_id = images_id[3960:5052] with open("/content/drive/MyDrive/Sentinel/val.txt", "w") as f: for val_item in val_image_id: f.write("%s\n" % val_item) if os.path.exists('/content/drive/MyDrive/Sentinel/test.txt'): print("Test split file already exist") else: test_image_id = images_id[5052:] with open("/content/drive/MyDrive/Sentinel/test.txt", "w") as f: for test_item in test_image_id: f.write("%s\n" % test_item) with open("/content/drive/MyDrive/Sentinel/train.txt", "r") as f: train_idx = [idx.rstrip() for idx in f] with open("/content/drive/MyDrive/Sentinel/val.txt", "r") as f: val_idx = [idx.rstrip() for idx in f] with open("/content/drive/MyDrive/Sentinel/test.txt", "r") as f: test_idx = [idx.rstrip() for idx in f] # + id="OBDd6_qzbT4K" class Segmentation_Dataset(Dataset): """ Image Segmentation Dataset.""" def __init__(self, data_path, images_ids): self.images_path = data_path + "images" self.labels_path = data_path + "labels" self.images_ids = images_ids def __len__(self): return len(self.images_ids) def __getitem__(self, idx): image = os.path.join(self.images_path,self.images_ids[idx],"07.tif") label = os.path.join(self.labels_path,self.images_ids[idx],"dlt.tif") image = imread(image) label = imread(label) label[label==255]=0 # Data Preprocesing rgb_image = np.transpose(image[:,:,[2,1,0]]) rgb_image = np.int16(rgb_image) rgb_image = np.clip(rgb_image,0,2000) rgb_image = rgb_image/2000 * 255 rgb_image = np.float32(rgb_image) rgb_image = torch.tensor(rgb_image) label = torch.tensor(label) return rgb_image,label # + id="MSbyswh93NZr" class UNET(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() # Conv block self.conv1 = self.contract_block(in_channels, 32, 7, 3) self.conv2 = self.contract_block(32, 64, 3, 1) self.conv3 = self.contract_block(64, 128, 3, 1) # Upconv block self.upconv3 = self.expand_block(128, 64, 3, 1) self.upconv2 = self.expand_block(64*2, 32, 3, 1) self.upconv1 = self.expand_block(32*2, out_channels, 3, 1) def __call__(self, x): # downsampling part conv1 = self.conv1(x) conv2 = self.conv2(conv1) conv3 = self.conv3(conv2) upconv3 = self.upconv3(conv3) upconv2 = self.upconv2(torch.cat([upconv3, conv2], 1)) upconv1 = self.upconv1(torch.cat([upconv2, conv1], 1)) return upconv1 def contract_block(self, in_channels, out_channels, kernel_size, padding): contract = nn.Sequential( torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding), torch.nn.BatchNorm2d(out_channels), torch.nn.ReLU(), torch.nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding), torch.nn.BatchNorm2d(out_channels), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1) ) return contract def expand_block(self, in_channels, out_channels, kernel_size, padding): expand = nn.Sequential(torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=padding), torch.nn.BatchNorm2d(out_channels), torch.nn.ReLU(), torch.nn.Conv2d(out_channels, out_channels, kernel_size, stride=1, padding=padding), torch.nn.BatchNorm2d(out_channels), torch.nn.ReLU(), torch.nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1) ) return expand # + id="fyiT9rSB41RD" unet = UNET(3,num_classes).cuda() # + id="7Oq0kkFO5Qc-" train_dataset = Segmentation_Dataset(data_path, train_idx) val_dataset = Segmentation_Dataset(data_path, val_idx) test_dataset = Segmentation_Dataset(data_path, test_idx) train_dl = DataLoader(train_dataset, batch_size=16, shuffle=True, drop_last=True) val_dl = DataLoader(val_dataset, batch_size=16, shuffle=True, drop_last=True) test_dl = DataLoader(test_dataset, batch_size=16, shuffle=True, drop_last=True) loss_function = nn.CrossEntropyLoss() # + id="veVslC536HF7" def train(model, dataloader, epochs, optimizer, loss_fn, device = "cuda"): model.train() train_running_loss = 0.0 for iter,(image,label) in enumerate(dataloader): image = image.to(device) label = label.to(device) optimizer.zero_grad() output = model(image) loss = loss_function(output,label.long()) if iter%100 == 0: print("Train epochs: "+str(epochs)+" Iteration: "+str(iter)+" loss: "+str(loss.item())) train_running_loss += loss.item() loss.backward() optimizer.step() return train_running_loss/len(dataloader) def val(model, dataloader, epochs, loss_fn, device = "cuda"): model.eval() val_running_loss = 0.0 for iter,(image,label) in enumerate(dataloader): image = image.to(device) label = label.to(device) output = model(image) loss = loss_function(output,label.long()) if iter%100 == 0: print("Val epochs: "+str(epochs)+" Iteration: "+str(iter)+" loss: "+str(loss.item())) val_running_loss += loss.item() return val_running_loss/len(dataloader) # + [markdown] id="590TV5HHXWBU" # **Due to limited resources, I have trained the model for 10 epochs. For better results we need to train the network for more epochs.** # + id="yjCcViMRLidK" colab={"base_uri": "https://localhost:8080/"} outputId="1ed89cbf-d810-41b5-95f9-6ffee3918682" num_of_epochs = 10 optimizer = torch.optim.Adam(unet.parameters(), lr=0.01) min_loss = np.inf for epoch in tqdm(range(num_of_epochs)): train_loss = train(unet, train_dl, epoch, optimizer, loss_fn, device="cuda") val_loss = val(unet, val_dl, epoch, loss_function, device="cuda") # writer.add_scalar("Loss/train", train_loss, epoch) # writer.add_scalar("Loss/val", val_loss, epoch) if (val_loss < min_loss): min_loss = val_loss checkpoint = { "model" : unet.state_dict(), "Val Loss" : val_loss } torch.save(checkpoint,checkpoint_path) # + [markdown] id="ML3HF7nUrpSK" # ### **Inference on RGB Image** # + id="V-OhMYLrK-pV" # Load RGB model def Load_model(model_dir): checkpoint = torch.load(model_dir) model = unet model.load_state_dict(checkpoint['model']) model = model.to(device="cuda") model.eval() return model # + id="pyBjW9m1vMpZ" def one_hot_encoding(label): labels = [0, 1, 2] semantics = [] for colour in labels: equality = np.equal(label, colour) semantics.append(equality) semantics = np.stack(semantics, axis=-1) return semantics def accuracy(outputs, labels): tp = torch.sum(torch.logical_and(labels, outputs)) tn = torch.numel(outputs) - torch.sum(torch.logical_or(labels, outputs)) acc = (tp + tn) / torch.numel(outputs) return acc def iou_matrix(outputs, labels): intersection = torch.logical_and(labels, outputs) union = torch.logical_or(labels, outputs) iou = (torch.sum(intersection) + SMOOTH) / (torch.sum(union)+ SMOOTH) return iou # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="pXRckjn-sDUx" outputId="87e8145a-96bf-49ce-9fa6-bb2e167b4eac" running_acc = 0.0 running_iou = 0.0 SMOOTH = 1e-6 # Load the model for inference model = Load_model(checkpoint_path) # iteration over all the test images for idx, item in enumerate(test_dl): image = item[0].to(device) label = item[1] one_hot_label = one_hot_encoding(label.numpy()) label = label.to(device) output = model(image) output = torch.argmax(output, dim=1) output_for_metrics = torch.Tensor(one_hot_encoding(output.detach().cpu().numpy())).int() label_for_metrics = torch.tensor(one_hot_label) # accuaracy metric acc = accuracy(output_for_metrics, label_for_metrics) running_acc += acc # iou metric iou = iou_matrix(output_for_metrics, label_for_metrics) running_iou += iou # Visualization of every 5th image from the test sample if idx % 5 == 0: fig = plt.figure(figsize=(10, 7)) image = torch.transpose(image[0], 0, 2).cpu().numpy() output = output[0].detach().cpu().numpy() output = labelVisualize(3, color_dict, output) label = label[0].cpu().numpy() label = labelVisualize(3, color_dict, label) cv2.imwrite(results_image_path+'res'+str(idx)+'_image.png',image) cv2.imwrite(results_image_path+'res'+str(idx)+'_label.png',label) cv2.imwrite(results_image_path+'res'+str(idx)+'_output.png',output) print("Image") cv2_imshow(image) print("Label") cv2_imshow(label) print("Network Output") cv2_imshow(output) print("---------------------------") if idx == 100: # To visualize the results on the first few images from the test dataset break mean_acc = running_acc / len(test_dl) # Mean accuaracy mean_iou = running_iou / len(test_dl) # Mean IOU print("Mean Average Accutacy: "+str(mean_acc)+" Mean IOU: "+str(mean_iou)) # + id="JTQFgaD12vnN" # + id="Ut5JXoK_vmDP"
satellite_image_segmentation_training_inference_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="lQj5MQxeuaff" # # UX 3 Descriptive Statistics # # Author: # - | # <NAME>, <EMAIL>\ # Dept. Architecture, Design and Media Technology, Aalborg University Copenhagen # --- # ## Learning goals # After working your way through this notebook you should be able to: # - Plot and summarise numerical data by showing # + Central Tendency # + Variability # - Describe the distribution of data using histograms # - Explain the relationship between standard error of mean and sample size # # ## Table of Contents # # - [2. Summarizing data](#Summarizing) # + [2.1. Central Tendency](#Tendency) # + [2.2. Variability](#variability) # - [3. Frequency Distributions](#Distributions) # + [3.1. Normal Distribution](#Normaldist) # - [4. Sample means and sample size](#SampleMeanSize) # - [5. Standard Error of the Mean](#StandardError) # + executionInfo={"elapsed": 2456, "status": "ok", "timestamp": 1620288759987, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="SvwZEbtmuafl" # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.stats as stats # + [markdown] id="llNylTZkuafo" # <a id= 'Summarizing'> </a> # + [markdown] id="6OOyBpFMuafp" # # ## 1 Summarizing data # # Once we have our sample, we first want to collect, classify, summarize and present the data so we can interpret it. This is what we use descriptive statistics for. Ways of summarizing data differ depending on the type and spread of collected data. # # + [markdown] id="llNylTZkuafo" # <a id= 'Tendency'> </a> # - # ### 1.1 Central tendency # # The central tendency is a single value that summarizes the data. If we are to pick one value to represent all measured values this is it. # # #### Arithmetic and Geometric Mean # The most commonly used measure of central tendency is the *ARITHMETIC MEAN* value, which is based on ALL values (or scores). One can think of it as a kind of "balancing point". The arithmetic mean of a sample can be calculated as # # $$\bar{x} =\frac {\sum_i^n{ x_i}} {N} $$ # # where $x_i$ is each individual score (that is $x_1, x_2, ..., x_n$) # # Since all values are weighted equally, any extreme values become quite influential. # # An alternative is to use the *GEOMETRIC MEAN*, calculated as the nth root of the product between individual scores: # # $$ \sqrt[n]{{ x_1}{ x_2}...{x_n}}$$ # # #### Median # The *MEDIAN* is also less sensitive to extreme values than the arithmetic mean. The median is the point below which half of the (numerically ordered) scores fall and is also called the 50th percentile. A downside with this is that while the median takes the middle value, it does not incorporate *all* values in doing so. # + [markdown] id="ydUOPdPSuafp" # ### Try out 1: # First, calculate different measures of central tendency for # A=[1, 1, 2, 3, 3, 3, 3, 4] # # Then, change A to include an extreme value and redo. # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1016, "status": "ok", "timestamp": 1620289256839, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="ONqB0Pq4uafp" outputId="c31296bd-4123-433f-9c96-36e44c46913c" ## Uncomment and run code for different measures of central tendency A=[1, 1, 2, 3, 3, 3, 3, 4] ## Uncomment next line to display MEDIAN # print('Median:') # print(np.median(A)) ## Uncomment next line to display arithmetic MEAN # print('Arithmetic mean:') # np.mean(A) ## Uncomment next line to display geometric MEAN # print('Geometric mean:') # stats.gmean(A) # - # ANSWER: How do the three measures of central tendency change when A changes? # # + [markdown] id="Lh43U8F_uafp" # #### Mode # # The *mode* is simply the most commonly occuring score in the data, and therefore a score always present in the data. This is used mostly for categorical (nominal) data. # # ### Try out 2: # what will be the answer for this call? # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 711, "status": "ok", "timestamp": 1620289419430, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="xg15mNE6uafq" outputId="f6c21935-dfad-4e08-c523-b53fee2b9e84" ## TRY OUT BY HAND FIRST: What will be the mode of the the following sample? # ["red", "blue", "blue", "red", "green", "red", "red"] ## Uncomment next line to display MODE # stats.mode(["red", "blue", "blue", "red", "green", "red", "red"]) # + [markdown] id="Z9YlSekluafq" # <a id= 'variability'> </a> # + [markdown] id="EIsqv_Psuafq" # ### 1.2. Variability # # Reporting only central tendency is a start, but it typically does not tell us much about the true # nature of the data, because it lacks information about *variability* (i.e. spread). The simplest way of reporting this variability is to specify the range of the data, i.e. the smallest and largest values. While this gives us some information, two variability measures that are more used are **variance** and **standard deviation**. These help us understand how closely to the central tendency values are located. # # VARIANCE is the sum of the **squared difference** between *each individual # score* and the *mean*, all divided by the *number* of scores # # $$\sigma^2 =\frac{\sum_i^N{ (x_i-\bar{x}})^2 } {N}$$ # # where $x_i$ is each # individual value and $\bar{x}$ is the # arithmetic mean. # The squared difference ("sum of squares") will re-appear in some of the statistical tests later on. # # STANDARD DEVIATION is simply the square root of the variance # $$\sigma =\sqrt {\sigma^2}$$ # # The standard deviation can be thought of as the average distance between all individual values and the sample mean. Assuming that data is normally distributed, most of the scores can be found within this distance (68.27% of them to be more precise). # # What you see above refers to the true variance and the true standard deviation of the population. For SAMPLE variance and standard deviation, the denominator is adjusted by replacing *N* by *(N-1)*. You can think of it as reducing the risk of underestimating the variance for small sample sizes (because the denominator N-1 is smaller than N, resulting in a higher calculated variance). # + [markdown] id="x7iVYogEuafr" # ### Try out 3: # Which of the following function calls calculate the sample standard deviation? # # np.std(a) # np.std(a, ddof=1) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 691, "status": "ok", "timestamp": 1620290437008, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="O9L9xubouafr" outputId="93d5ef57-bdf7-46af-a62b-76bedcf3229a" # Which of the following function calls are calculating the sample mean? # 'ddof' Means Delta Degrees of Freedom. # The divisor used in calculations is N - ddof, where N represents the number of elements. # By default ddof is zero. a=np.arange(1,4) # Compare the following: print(np.std(a)) print(np.std(a, ddof=1)) # TIP look up ddof in: help(np.std) # + [markdown] id="gxC1saKouafs" # <a id= 'Distributions'> </a> # + [markdown] id="MLmavQbeuafs" # ## 2. Frequency Distributions # # The central tendency and spread of the data summarize some characteristics of the sample, but to get a better overview we would like to know about the *distribution* of data points. Histograms will tell us about the shape of the distribution. There are a few shapes that can be expected to occur in different kinds of data: # - *NORMAL* distributions (bell shaped and symmetrical), # - *SKEWED* distributions (one tail longer than the other), # - *UNIFORM* distributions (flat), # - and *BIMODAL* (two main peaks). # + [markdown] id="PeNcx4R5uaft" # ### Try out 4: # What kind of distribution is this? Plot the histogram for different increasing N and try to determine from the shape what type of distribution that emerges (normal, skewed, uniform or bimodal?). # + colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"elapsed": 792, "status": "ok", "timestamp": 1620290681990, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="NyFKv944uaft" outputId="e3d9840d-93ec-4709-8a51-c2e345c4385a" # Try increasing the N to draw a larger sample # Increase it by factors of 10 (4 - 40 - 400 - 4000 - 40000...) to see how the shape changes N=4 scores=np.random.randint(1,6,N) plt.hist(scores) plt.show() # TIP: for more info, uncomment the next line and run # help(np.random) # + [markdown] id="gxC1saKouafs" # <a id= 'Normaldist'> </a> # + [markdown] id="F3BNEZVJuaft" # ### 2.1 Normal distribution # # # The *Gaussian* distribution is so common that it is also called a *normal* distribution. A lot of different biological processes give rise to data distributed in this fashion (e.g. heights, weights, bacteria growth...) # # The distribution is defined by the function: # # $$ y(x) =\frac{1}{\sigma \sqrt{2 \pi}} e^{-\frac{(x-\mu)^2}{2\sigma^2}}$$ # # where $\sigma$ is the standard devaiation and $\mu$ is the # arithmetic mean. Since normal distributions are perfectly symmetrical, the mean, median and mode are the same. Note that we here use *Greek* letters to denote the *TRUE* mean ($\mu$) and standard deviation ($\sigma$) of the population (as opposed to the *SAMPLE* mean $\bar{x}$ and SAMPLE standard deviation $s$). # + id="jVLpRX9_uaft" outputId="6d3f3d8f-4cdf-4c2e-aa81-95174664b56f" # Plot a standard normal distribution where: # Standard deviation (sigma) = 1 # Mean (mu) = 0 # Generate an x-vector x = np.arange(-4, 4, 0.1) sigma1 = 1 mu1 = 0 # Calculate a normal distribution of x with a mean of 0 y1 =1/(sigma1* np.sqrt(2*np.pi))*np.exp(-((x-mu1)**2)/(2 * sigma1**2)) plt.plot(x,y1) # Plot the mean value as vertical line plt.axvline(mu1, 0, 1) plt.show() # + [markdown] id="K3pUBK4Ruafu" # ### Try out 5: # Plot the normal distribution with different mean and std. What changes in the plot? # + id="f_C0rKZ-uafu" # Copy and modify the code above to plot the normal distribution again # But now with different values for mean and std. # What changes in the plot? # + [markdown] id="0cmsr9scuafu" # In a normal distribution, the majority of scores lie around the centre (main peak) of the distribution. The width of the bell says something about the variability of data, specifically how well-centered it is around the mean value. In fact, as seen in the equation above, the width is linked to the standard deviation. 68.26 % of all values can be found in the region $\mu \pm \sigma$. If we increase the range to $\mu \pm 2\sigma$, we account for almost ALL values, 96.44%. # # An example of normally distributed data can be seen in human height: # https://ourworldindata.org/human-height#height-is-normally-distributed # # + [markdown] id="HMm_W3iSuafu" # We can simulate drawing a sample measuring heights # # ### Try out 6: # Look at the code and histogram in the below code. What is the mean and the standard deviation for this distribution? # + colab={"base_uri": "https://localhost:8080/", "height": 290} executionInfo={"elapsed": 1156, "status": "ok", "timestamp": 1620291443003, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="9kmS6l9Fuafv" outputId="04f594d3-33ae-4b3a-80b7-8f5385705327" # What is the mean and the standard deviation for this distribution # Sample size N = 50 # Sample values sampleheights = np.random.normal(164,7,N) plt.hist(sampleheights) plt.ylabel('occurrences') plt.xlabel('height (cm)') plt.show() # + [markdown] id="h9kYXZoouafv" # ### Try out 7: # Go to the link on human heights https://ourworldindata.org/human-height#height-is-normally-distributed and find the mean and standard deviation values for of the opposite sex. # Copy the code above and insert these values so that they correspond to samples of the opposite sex. # + id="kHGI980yuafv" # Copy the random sampling code above and insert the values from human height data # so that they correspond to samples of the opposite sex # + [markdown] id="aptYtx83uafv" # <a id= 'SampleMeanSize'> </a> # + [markdown] id="Oawf5_4euafv" # ## 3. Sample mean and sample size # # We draw samples because it is impractical (or impossible) to measure all individuals in the whole population. We therefore need to consider the sample size needed for our sample mean ($\bar{x}$) to be a good estimate of the true population mean ($\mu$). # As our sample size N increases it will start to approach the whole population, so the immediate answer is the larger the better. The reason for this we can see in the variability of sample means. Practically, however, larger samples cost resources and we will later return to what determines a reasonable N. # # We can investigate how the sample means vary in relation to the true population mean $\mu$ by simulating samples from a normally distributed population. By repeatedly drawing means of a sample size N, calculating their respective sample means, and plotting these, we can see how they differ. # + [markdown] id="Ivvf_c6buafw" # ### Try out 8: # Draw different samples, each with a sample size N. Then extend the code to add means for all samples and plot the histogram of the resulting scores. # # Compare to the true mean of the population (the one we send to the random-function). # # Repeat for different Ns. # + executionInfo={"elapsed": 657, "status": "ok", "timestamp": 1620291749368, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="ANrZ3YuVuafw" # Draw different samples, each with a sample size N. # Sample size N=5 # Mean of the population mean_pop = 164 # Standard deviation of the population std_pop = 7 # Randomly generate 5 sample from the same population sample1=np.random.normal(mean_pop, std_pop, N) sample2=np.random.normal(mean_pop, std_pop, N) sample3=np.random.normal(mean_pop, std_pop, N) sample4=np.random.normal(mean_pop, std_pop, N) sample5=np.random.normal(mean_pop, std_pop, N) # Extend this to add means for all samples allmeans= [np.mean(sample1), np.mean(sample2)] # Add code to plot the histogram of the resulting scores. # Compare to the true mean of the population # Repeat for different Ns # + [markdown] id="gKPgVCtpuafw" # <a id= 'StandardError'> </a> # + [markdown] id="jU6LQ3ZLuafw" # ## 4. Standard Error of the Mean # # The estimate of the true population mean vary among the samples drawn from the population. As we collect more and more samples it becomes clear that **the sample means *themselves* form a distribution with some variability**. If the sample size N is large enough, most sample means will be close to the true population mean (assuming that the standard deviation of the sample means distribution is small). # # The standard deviation of the sample means distribution is called *standard error of the mean* and it serves as a measure of how precise our estimation of the true mean is. # # The effect of sample size on standard error of the mean can be seen by drawing K samples, each of size N, calculating sample means and plot them. # + id="cSPSVlMguafx" outputId="13f31763-58fe-4a82-a514-396756a5204f" # The effect of sample size on standard error of the mean can be seen # by drawing K samples, each of size N, calculating sample means and plotting them. # K is the number of samples we draw K = 100 # N contains the sample sizes we are comparing N = [10, 20, 30] # Initialize an array for the sample means meanarr = np.zeros(K, float) for i in range(0,len(N)): # For each sample size... for j in range(0,K): # And for each individual sample of that size... score=np.random.normal(200,20,N[i]) # Generate normally-distributed sample vector meanarr[j] = score.mean() # Calculate the sample mean count, bins, ignored = plt.hist(meanarr, density=True) # Plot histogram with K means (corresponding to one sample size) print('Standard error =', meanarr.std()) # Print the standard error for each sample size plt.ylabel('Occurrences') plt.xlabel('Average Score') plt.show() # The plot shows that the larger samples have means that are closer to the true population mean # Hence they have a higher peak, with lower standard deviation (bell width) # - # # For most purposes, however, we approximate the standard error $\sigma_{\bar{x}}$ by using the sample standard deviation $s$ and sample size $N$. # # # $$ \sigma_{\bar{x}} =\frac{s}{\sqrt{N}}$$ # + [markdown] id="v5aamg9juafx" # ### Try out 9: # Change the number of samples drawn (K) in the code above and see how the distribution changes. # + id="yyQi_bsAuafx" # Copy the code above but change the number of samples drawn K and see how the distribution changes # + [markdown] id="_Roq1r_xuafx" # ### Try out 10: # Add additional sample sizes in the array for N and see the effect of the standard error. # + id="HMCftLb6uafx" # Copy the code above but add additional sample sizes for N and see the effect of the standard error # - # ANSWER: How does the standard error of the mean change with larger N and K? How does this relate to the histogram of the means?
UX3_Descriptive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns get_ipython().run_line_magic('matplotlib','inline') plt.style.use('seaborn-whitegrid') dataset=pd.read_csv("diabetes.csv") dataset data=dataset[['Pregnancies','BloodPressure']] data.head() x=data['Pregnancies'] y=data['BloodPressure'] sns.lineplot(x,y,dashes=True) plt.show() from scipy.stats import norm correlation=data.corr() print(correlation) sns.heatmap(correlation,cmap='BrBG') plt.show() covar=data.cov() print(covar) sns.heatmap(covar) plt.show() # + #Normalization # - dataset dataset.shape Age=np.array(dataset['Age']) Age Age=np.array(dataset['Age']) print("max Age",max(Age)) Age=Age.reshape(768,1) Age=np.array(dataset['Age']) print("max Age",max(Age)) Age=Age.reshape(768,1) from scipy import stats zscore=np.array(stats.zscore(Age)) zscore=zscore[0:394] zscore=zscore.reshape(2,197) zscore # + #Decimal Normalization # - dn=[] dn.append(Age/pow(10,2) ) dn=np.array(dn) dn # + #min-max normalization # - from sklearn.preprocessing import MinMaxScaler scaler=MinMaxScaler() Age=np.array(dataset['Age']) Age=Age.reshape(-1, 1) MinMax = scaler.fit(Age) MinMax scaler.transform(Age)
18cse102-Assignment5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Train a character-level GPT on some text data # # The inputs here are simple text files, which we chop up to individual characters and then train GPT on. So you could say this is a char-transformer instead of a char-rnn. Doesn't quite roll off the tongue as well. In this example we will feed it some shakespear, which we'll get it to predict character-level. # set up logging import logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # make deterministic from mingpt.utils import set_seed set_seed(42) import numpy as np import torch import torch.nn as nn from torch.nn import functional as F # + import math from torch.utils.data import Dataset class CharDataset(Dataset): def __init__(self, data, block_size): chars = list(set(data)) data_size, vocab_size = len(data), len(chars) print('data has %d characters, %d unique.' % (data_size, vocab_size)) self.stoi = { ch:i for i,ch in enumerate(chars) } self.itos = { i:ch for i,ch in enumerate(chars) } self.block_size = block_size self.vocab_size = vocab_size self.data = data def __len__(self): return math.ceil(len(self.data) / (self.block_size + 1)) def __getitem__(self, idx): # we're actually going to "cheat" and pick a spot in the dataset at random i = np.random.randint(0, len(self.data) - (self.block_size + 1)) chunk = self.data[i:i+self.block_size+1] dix = [self.stoi[s] for s in chunk] x = torch.tensor(dix[:-1], dtype=torch.long) y = torch.tensor(dix[1:], dtype=torch.long) return x, y # - block_size = 128 # spatial extent of the model for its context # you can download this file at https://github.com/karpathy/char-rnn/blob/master/data/tinyshakespeare/input.txt text = open('input.txt', 'r').read() # don't worry we won't run out of file handles train_dataset = CharDataset(text, block_size) # one line of poem is roughly 50 characters from mingpt.model import GPT, GPTConfig mconf = GPTConfig(train_dataset.vocab_size, train_dataset.block_size, n_layer=8, n_head=8, n_embd=512) model = GPT(mconf) # + from mingpt.trainer import Trainer, TrainerConfig # initialize a trainer instance and kick off training tconf = TrainerConfig(max_epochs=200, batch_size=512, learning_rate=6e-4, lr_decay=True, warmup_tokens=512*20, final_tokens=200*len(train_dataset)*block_size, num_workers=4) trainer = Trainer(model, train_dataset, None, tconf) trainer.train() # + # alright, let's sample some character-level shakespear from mingpt.utils import sample context = "O God, O God!" x = torch.tensor([train_dataset.stoi[s] for s in context], dtype=torch.long)[None,...].to(trainer.device) y = sample(model, x, 2000, temperature=0.9, sample=True, top_k=5)[0] completion = ''.join([train_dataset.itos[int(i)] for i in y]) print(completion) # + # well that was fun
play_char.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # T Tests on Independent Samples # # The **T Test** (also called **Student’s T Test**) compares two averages (means | $\mu$) and tells us if they are different from each other. The t test also tells us how **significant** the differences are. In other words it lets us know if those differences could have occurred by chance. # # A drug company may want to test a new cancer drug to find out if it improves life expectancy. In an experiment, there’s always a control group (a group who are given a placebo). The control group may see an average increased life expectancy of 5 years, while the group taking the new drug might witness an average increase in life expectancy of 6 years. This might indicate that the drug is effective, however that could be down to unique attributes in the sample. To test the efficacy of the drug for the entire population, researchers would have to use a Student’s t test to find out if the results are statistically significant over the whole population. # # # ### T score # The **T score** or **T value** is a **ratio** between the **difference between two groups** and the **difference within the two groups**. # # - **large** t-score $\implies$ groups are **different**. # - **small** t-score $\implies$ groups are **similar**. # # When we run a t test, the bigger the t value, the more likely it is that the results will be reproducable across the population. That raises the question **how big is big enough?** Every t-value has a **p-value** to go with it. A p-value is the **probability** that the results from your sample data occurred by chance. P-values range from 0% to 100%. a p value of 5% is 0.05. Low p-values are good; They indicate that our data did not occur by chance. A p-value of .01 means there is only a 1% probability that the results from an experiment happened by chance. In most cases, a p-value of 0.05 (5%) is accepted to mean that the data is valid. This threshold (0.05 or 0.1) is called the **significance level** and is denoted by $\alpha$. $\alpha$ can be 0.05 or 0.1. # # ### T Test variations # # There are three variations of the t-test: # # - An **Independent samples** t-test compares the means for two groups. # - A **Paired sample** t-test compares means from the same group at different times (say, one year apart). # - A **One sample** t-test compares the mean of a single group against a known mean. # # ## Independent Samples T Test # # The **Independent samples** t-test helps us to compare the means of two sets of data. We could run a t test to see if the average math test scores of males and females are different and the Independent Samples T Test helps answer whether these differences could have occurred by random chance. # ### Loading libraries and dataset import os import csv import math import time import numpy as np import pandas as pd import seaborn as sn from scipy import stats import matplotlib.pyplot as plt from itertools import combinations from prettytable import PrettyTable path = os.getcwd() + "\\data" start = time.time() csvFile = path + "\\NMttest.csv" with open(csvFile) as fp: reader = csv.DictReader(fp) data = {} for row in reader: for header, value in row.items(): try: data[header].append(value) except KeyError: data[header] = [value] for key, value in data.items(): data[key] = list(filter(None, data[key])) data[key] = list(map(lambda x: float(x), data[key])) #print("Time to read file ->", round(time.time() - start, 3), "seconds.\n") t = PrettyTable(data.keys()) df = pd.DataFrame.from_dict(data, orient='index').transpose() for index, row in df.iterrows(): t.add_row(row) print(t) # + [markdown] variables={"len(data.keys())": "5"} # This is a dataset of {{len(data.keys())}} independent numeric variables. They indicate the number of packages manufactured by each manufacturing shift for different number of days. So, `shiftClass31` indicates the number of packages produced by the morning shift on line 3. Each sample has a different number of entries and that is taken into account while performing the t-test. # - # ### Finding pairwise combinations for 2 sample t-tests # + [markdown] variables={"len(data.keys())": "5"} # The t-test can only be performed on 2 samples at a time. Since we are looking for the **significance relationship** between any pair of shifts, we will perform the t-test on each possible combination of the shifts. A combination is a selection of items from a collection, such that the order of selection does not matter. We can find a $k$-combination of a set $S$ as a subset of $k$ distinct elements of $S$. If the set has $n$ elements, the number of $k$-combinations is equal to $$\frac{n!}{k!(n-k)!}$$ # <br> # In the case of a t-test, that resolves to $$\frac{p!}{2*(p-2)!}$$ # <br> # where # <br> # $p =$ **Number of parameters**. In this case $p =$ {{len(data.keys())}}. Therefore, we can get the number of possible combinations as - $$\frac{5!}{2*(5-2)!} = 10$$ # - featureCombos = (list(combinations(data.keys(),2))) t2 = PrettyTable(['sample1', 'sample2']) for elem in featureCombos: t2.add_row(elem) print(t2) # + [markdown] variables={"len(featureCombos)": "10"} # As we can see above, we have found the {{len(featureCombos)}} different combinations between the features/parameters. We can now proceed on to performing the t-test between the 2 samples and evaluating if they have a **significance relationship**. # + [markdown] variables={"featureCombos[2][0]": "shiftClass21", "featureCombos[2][1]": "shiftClass31"} # ### Visualizing the data # # Let us visualize one of these pairs of data say `{{featureCombos[2][0]}}` and `{{featureCombos[2][1]}}` # - sn.set_style("ticks") df[['shiftClass31','shiftClass21']].head() plt.figure(figsize=(16, 10), dpi= 80, facecolor='w', edgecolor='k') sn.distplot(df['shiftClass31'].dropna(), hist=False, rug=True) sn.distplot(df['shiftClass21'].dropna(), hist=False, rug=True) plt.show() # + [markdown] variables={"featureCombos[2][0]": "shiftClass21", "featureCombos[2][1]": "shiftClass31"} # This is a kernel density plot between `{{featureCombos[2][0]}}` and `{{featureCombos[2][1]}}`. Taking a look at the plot, it seems that the two variables have very different value ranges. # - plt.figure(figsize=(16, 10), dpi= 80, facecolor='w', edgecolor='k') df.boxplot(column=['shiftClass31', 'shiftClass21']) plt.show() if 'shiftClass21' in df.columns: print(df[['shiftClass21']]) # ### Calculating the T - Value # + [markdown] variables={"featureCombos[2][0]": "shiftClass21", "featureCombos[2][1]": "shiftClass31"} # Let us consider the numerical vectors `{{featureCombos[2][0]}}` and `{{featureCombos[2][1]}}`. We can calculate the T-Value for this pair in the following way. # + [markdown] variables={"featureCombos[2][0]": "shiftClass21", "featureCombos[2][1]": "shiftClass31"} # #### Step 1. Sum the two groups # # Let - <br>$A = $ `{{featureCombos[2][1]}}`<br> $B = $ `{{featureCombos[2][0]}}` # # $$sum(A) = \sum_{i=1}^{N_A} A $$ # # $$sum(B) = \sum_{i=1}^{N_B} B $$ # - A = featureCombos[2][1] B = featureCombos[2][0] sumA = sum(df[[A]].dropna().values)[0] sumB = sum(df[[B]].dropna().values)[0] print("sum(A)\t:\t", sumA) print("sum(B)\t:\t", sumB) # #### Step 2. Square the sums of the two groups # # $$sumSq(A) = (\sum_{i=1}^{N_A} A)^2 $$ # # $$sumSq(B) = (\sum_{i=1}^{N_B} B)^2 $$ sumAsq = sumA ** 2 sumBsq = sumB ** 2 print("sumSq(A)\t:\t", sumAsq) print("sumSq(B)\t:\t", sumBsq) # #### Step 3. Calculate the means of the two groups # # $$\mu(A) = \dfrac {\sum ^{N_A}_{i=1}A}{N_A} $$ # # $$\mu(B) = \dfrac {\sum ^{N_B}_{i=1}B}{N_B} $$ avgA = sumA/len(df[[A]].dropna().values) avgB = sumB/len(df[[B]].dropna().values) print("mean(A)\t:\t", avgA) print("mean(B)\t:\t", avgB) # #### Step 4. Sum the squares of each instance # # $$ssq(A) = \sum_{i=1}^{N_A} A_{i}^2 $$ # # $$ssq(B) = \sum_{i=1}^{N_B} B_{i}^2 $$ ssqA = sum(map(lambda x: x ** 2, df[[A]].dropna().values)) ssqB = sum(map(lambda x: x ** 2, df[[B]].dropna().values)) print("ssq(A)\t:\t", ssqA[0]) print("ssq(B)\t:\t", ssqB[0]) # #### Step 4. Find the Degrees of Freedom # # $$DF = N_A + N_B - 2$$ degreesFreedom = len(df[[A]].dropna().values) + len(df[[B]].dropna().values) - 2 print("Degrees of Freedom\t:\t", degreesFreedom) # #### Step 5. Plug in the pre-calculated values to find the T-Value # # $$ t = \dfrac{\mu_A - \mu_B}{\sqrt{\dfrac{(ssq(A) - \dfrac{sumSq(A)}{N_A}) + (ssq(B) - \dfrac{sumSq(B)}{N_B})}{DF}.(\dfrac{1}{N_A} + \dfrac{1}{N_B}) }} $$ t = (avgA - avgB)/math.sqrt((((ssqA[0] - sumAsq/len(df[[A]].dropna().values))+(ssqB[0] - sumBsq/len(df[[B]].dropna().values)))/degreesFreedom)*(1.0/len(df[[A]].dropna().values)+1.0/len(df[[B]].dropna().values))) print("T-Value\t:\t", t) # ### Calculating the P - Value pval = stats.t.sf(abs(t), degreesFreedom) * 2 print("p-Value\t:\t", pval) # + [markdown] variables={"degreesFreedom": "46", "featureCombos[2][0]": "shiftClass21", "featureCombos[2][1]": "shiftClass31", "round(pval, 4)": "0.005", "round(t, 4)": "-2.9462"} # Therefore, we can see that for `{{featureCombos[2][1]}}` and `{{featureCombos[2][0]}}` - # * Degrees of Freedom $ = $ `{{degreesFreedom}}` # * t-value $ = $ `{{round(t, 4)}}` # * p-value $ = $ `{{round(pval, 4)}}` # * $\alpha = $ `0.05` # # Now, given that our calculated p-value is $ < \alpha$, we can safely conclude that the means of `{{featureCombos[2][1]}}` and `{{featureCombos[2][0]}}` are **significantly** different from one another. # - sigFlag = 0 if(pval < 0.05): sigFlag = 1 t2 = PrettyTable(['sample1', 'sample2', 't_value', 'degrees_freedom', 'p_value', 'significant']) r = [featureCombos[2][1], featureCombos[2][0], round(t, 4), degreesFreedom, round(pval, 4), sigFlag] t2.add_row(r) print(t2) # ### Pairwise 2-sample t-tests # We can now run a general script that calculates the pairwise p-values for each combination of features. # + def ttest(combo, a, b): sigFlag = 0 # print("Combo of", combo[0], "and", combo[1]) sumA = float(sum(a)) sumB = float(sum(b)) sumAsq = sumA ** 2 sumBsq = sumB ** 2 avgA = sumA/len(a) avgB = sumB/len(b) ssqA = sum(map(lambda x: x ** 2, a)) # print(ssqA) ssqB = sum(map(lambda x: x ** 2, b)) degreesFreedom = len(a) + len(b) - 2 t = (avgA - avgB)/math.sqrt((((ssqA - sumAsq/len(a))+(ssqB - sumBsq/len(b)))/degreesFreedom)*(1.0/len(a)+1.0/len(b))) # print("T value ->", t) pval = stats.t.sf(abs(t), degreesFreedom)*2 # print("2 Tailed P value ->", pval) if(pval < 0.05): sigFlag = 1 res = [combo[0], combo[1], round(t, 5), degreesFreedom, round(pval, 6), sigFlag] return res # - t2 = PrettyTable(['sample1', 'sample2', 't_value', 'degrees_freedom', 'p_value', 'significant']) for elem in featureCombos: sampleA = df[elem[0]].dropna().tolist() sampleB = df[elem[1]].dropna().tolist() result = ttest(elem, sampleA, sampleB) t2.add_row(result) print(t2)
significance-tests/t-test/ttest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/fertorresfs/Analyzing-Video-with-OpenCV-and-NumPy/blob/master/BERT_Quora_Dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="WYJj7y2uT5le" # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"; os.environ["CUDA_VISIBLE_DEVICES"]="0"; # + [markdown] id="MP6qBSzJvJSv" # # **Trabalho apresentado a disciplina SCC5871 - Algoritmos de Aprendizado de Máquina** # # Grupo 6 # # 11919610 - <NAME> # # + [markdown] id="I-lfLCFHSXmV" # ### Download e Instalação do kaggle # Tutorial: https://www.kaggle.com/general/74235 # + colab={"base_uri": "https://localhost:8080/", "height": 463, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} id="9hVA30hxQVea" outputId="f4f42490-4542-4363-c718-df0f8ae02875" # ! pip install -q kaggle from google.colab import files files.upload() # ! mkdir ~/.kaggle # ! cp kaggle.json ~/.kaggle/ # ! chmod 600 ~/.kaggle/kaggle.json #Check if everything is OK! # ! kaggle datasets list # + [markdown] id="vM544XF0sMlP" # ### Download do Dataset # + [markdown] id="ENyoaR6RC20W" # **Data:** Quora Question Pairs - https://www.kaggle.com/c/quora-question-pairs/data # + colab={"base_uri": "https://localhost:8080/"} id="M4yTXVGQvGhp" outputId="c0d7ec3e-f4f5-4b16-ee95-db5d6e4480b3" # !kaggle competitions download -c quora-question-pairs # !unzip train.csv.zip -d train # + [markdown] id="rShJOK4j2Rhe" # # **Requirements** # + [markdown] id="ibR9nVgfsjge" # ### Import Libs # + colab={"base_uri": "https://localhost:8080/"} id="hZaCyFb6r-W4" outputId="62d09eed-47e6-4102-a018-419077c77b23" # Importando bibliotecas import pandas as pd # !pip install ktrain import ktrain from ktrain import text # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="5Z_EjZX0sGVT" outputId="8abbc7cd-3ed6-4b52-f528-35995d0d331c" dataset = pd.read_csv('/content/train/train.csv') dataset = dataset.dropna() dataset # + [markdown] id="IzmPv3Kl2PZq" # # Reduzindo e dividindo em conjunto de treino e teste # + id="VmFe3a1YIARr" dataset_size = 200000 train_size = dataset_size * 0.3 #30% para treino # test_size = dataset_size - train_size # Seleciona os primeiros 100k diferentes dif = dataset.loc[dataset.is_duplicate == 0] dif_part = dif.iloc[:int(dataset_size/2),:] train_dif_part = dif_part.iloc[:int(train_size/2),:] test_dif_part = dif_part.iloc[int(train_size/2):,:] # Seleciona os primeiros 100k iguais equal = dataset.loc[dataset.is_duplicate == 1] equal_part = equal.iloc[:int(dataset_size/2),:] train_equal_part = equal_part.iloc[:int(train_size/2),:] test_equal_part = equal_part.iloc[int(train_size/2):,:] # Concatena os dois conjuntos train_df = pd.concat([train_dif_part,train_equal_part]) test_df = pd.concat([test_dif_part,test_equal_part]) # + colab={"base_uri": "https://localhost:8080/"} id="o5hQmenLO2UV" outputId="fae69be9-c92a-49af-d8e0-05700ccadee4" x_train = train_df[['question1', 'question2']].values y_train = train_df['is_duplicate'].values x_test = test_df[['question1', 'question2']].values y_test = test_df['is_duplicate'].values # # IMPORTANT: data format for sentence pair classification is list of tuples of form (str, str) x_train = list(map(tuple, x_train)) x_test = list(map(tuple, x_test)) y_test[36252] # + colab={"base_uri": "https://localhost:8080/"} id="GzAYbfwOGL4z" outputId="1ca126d2-d48e-41d5-f5f6-75ddcb2fbaf9" print(x_train[0]) print(y_train[0]) # + colab={"background_save": true, "base_uri": "https://localhost:8080/", "height": 417, "referenced_widgets": ["c8fb29eaada14170bc9b69cab3ae4454", "d02168066b464edbb222ca273c14b31b", "678c6c17a23b4ceaa52677ee9defb6ad", "43803fe5731e4b328004b1f4da6db911", "83331d8ad7c740339f2206e3abe92c9b", "<KEY>", "bdf6da53019d4791b643907470bef9ea", "88d0853d2e8746948825a5d06f9dee72", "<KEY>", "4c9067c9a6794013a7f8d8e9609a9f04", "<KEY>", "cca43db53ac341ee9911496f6e06f6bf", "d6f15234bbb34b64ad416b33be203120", "<KEY>", "<KEY>", "8fd03cb116aa4d4aa178d8a43b8258a0", "90333ac852af42b8a5d4f0a30f38a9c5", "<KEY>", "38ac978e3ae7444098f47e710795d51e", "<KEY>", "3473d4fad1af47368059158b459ecf26", "38fd684245da4ded99ed3fc3ff7c0cc1", "13d7bbe3f1c34315aa4a5c4067ba914d", "<KEY>", "<KEY>", "ebd92cd18e024040aa04c3e73a0d3b94", "<KEY>", "f41bee661501482181627c4f96113a4c", "bacd1fde480d40fba90cfe54d4818f59", "0f68e67739f44951b8e568c3727f7256", "<KEY>", "b6ebe4804abe43da8f2224b6ca9d9d4e"]} id="kKCjvMoLGlvY" outputId="88e026c0-d6ab-4278-84ad-a99134f90cf2" MODEL_NAME = 'bert-base-uncased' t = text.Transformer(MODEL_NAME, maxlen=128, class_names=['non-duplicate', 'duplicate']) trn = t.preprocess_train(x_train, y_train) val = t.preprocess_test(x_test, y_test) model = t.get_classifier() learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=32) # lower bs if OOM occurs #testes devem ser menores que 5, pois segundo o prof. são obtidos os melhores resultados learner.fit_onecycle(5e-5, 3) #learner.fit_onecycle(4e-4, 3) #learner.fit_onecycle(3e-4, 3) #learnet.fit_onecycle(1e-3, 3) # + id="KcTH0prkPj5y" learner.validate(class_names=t.get_classes()) # + [markdown] id="eH273wOnnPrq" # ------- 5e-5, 3 ------ (RESULTADOS DO ONECYCLE) # begin training using onecycle policy with max lr of 5e-05... # Epoch 1/3 # 1875/1875 [==============================] - 4853s 3s/step - loss: 0.4045 - accuracy: 0.8132 - val_loss: 0.3263 - val_accuracy: 0.8612 # Epoch 2/3 # 1875/1875 [==============================] - 4863s 3s/step - loss: 0.2737 - accuracy: 0.8885 - val_loss: 0.2992 - val_accuracy: 0.8748 # Epoch 3/3 # 1875/1875 [==============================] - 4871s 3s/step - loss: 0.1285 - accuracy: 0.9544 - val_loss: 0.3519 - val_accuracy: 0.8813 # (RESULTADOS VAlIDATE) # # --------------------------------------------------------------------------- # learner.fit_onecycle(5e-4, 3) # begin training using onecycle policy with max lr of 0.0005... # Epoch 1/3 # 1875/1875 [==============================] - 2755s 1s/step - loss: 0.5757 - accuracy: 0.6462 - val_loss: 0.6940 - val_accuracy: 0.5000 # Epoch 2/3 # 1875/1875 [==============================] - 2752s 1s/step - loss: 0.6953 - accuracy: 0.4990 - val_loss: 0.6932 - val_accuracy: 0.5000 # Epoch 3/3 # 1875/1875 [==============================] - 2740s 1s/step - loss: 0.6935 - accuracy: 0.5007 - val_loss: 0.6932 - val_accuracy: 0.5000 # <tensorflow.python.keras.callbacks.History at 0x7f9e51281b70> # # precision recall f1-score support # # non-duplicate 0.00 0.00 0.00 70000 # duplicate 0.50 1.00 0.67 70000 # # accuracy 0.50 140000 # macro avg 0.25 0.50 0.33 140000 # weighted avg 0.25 0.50 0.33 140000 # # /usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. # _warn_prf(average, modifier, msg_start, len(result)) # array([[ 0, 70000], # [ 0, 70000]]) # --------------------------------------------------------------------------- # begin training using onecycle policy with max lr of 5e-05... # Epoch 1/3 # 1875/1875 [==============================] - 2642s 1s/step - loss: 0.3981 - accuracy: 0.8178 - val_loss: 0.3220 - val_accuracy: 0.8608 # Epoch 2/3 # 1875/1875 [==============================] - 2647s 1s/step - loss: 0.2785 - accuracy: 0.8860 - val_loss: 0.3055 - val_accuracy: 0.8705 # Epoch 3/3 # 1875/1875 [==============================] - 2652s 1s/step - loss: 0.1325 - accuracy: 0.9543 - val_loss: 0.3516 - val_accuracy: 0.8796 # # <tensorflow.python.keras.callbacks.History at 0x7f859126fc88> # # precision recall f1-score support # # non-duplicate 0.89 0.87 0.88 70000 # duplicate 0.87 0.89 0.88 70000 # # accuracy 0.88 140000 # macro avg 0.88 0.88 0.88 140000 # weighted avg 0.88 0.88 0.88 140000 # # array([[60604, 9396], # [ 7454, 62546]]) # # # + [markdown] id="OTLC1wf762Ix" # # **Plotando a perda do modelo** # + id="lhQZkJHm60QD" learner.plot() # + id="8lSJLH6jBR3n" predictor = ktrain.get_predictor(learner.model, t) # + id="w84za3TcLtqP" y_test[69980:70010] # + id="y02TqjD3BR3w" positive = x_test[69980] negative = x_test[70010] # + id="Hc9YtG07BR3y" print('Duplicate:\n%s' %(positive,)) # + id="vhwHvbkzBR30" print('Non-Duplicate:\n%s' %(negative,)) # + id="gXw0prDhBR32" predictor.predict(positive) # + id="O60Yiq5hM2Zs" predictor.predict(negative) # + id="pmarVlV6M6rl" predictor.predict([positive, negative]) # + id="HsI9Wpy0M_iT" predictor.save('/tmp/mrpc_model') # + id="U0c-_3GqNI87" p = ktrain.load_predictor('/tmp/mrpc_model') # + id="K9tZhJPLNMUT" p.predict(positive)
BERT_Quora_Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Captum Visual Insights for BERT Seq Classification Model # This Notebook helps you to get started with the Captum Insights.The example covered here is from the Hugginface Transformers pre-trained model used in Torchserve.In order to understand the word importances and attributions when we make an Explanation Request, we use Captum Insights import json import captum from captum.attr import visualization as viz import os import logging import numpy as np import json import torch from transformers import AutoTokenizer # Create the mar file for the BERTSeqClassification model and place the artefacts from where you can serve the model. # # !torch-model-archiver --model-name bert_explain --version 1.0 --serialized-file Transformer_model/pytorch_model.bin --handler Transformer_model/Transformer_handler_generalized.py --extra-files "Transformer_model/config.json,Transformer_model/setup_config.json,Transformer_model/index_to_name.json,Transformer_model/vocab.txt" #curl request to make a Prediction Request # !curl -H "Content-Type: application/json" --data @examples/Huggingface_Transformers/bert_ts.json http://127.0.0.1:8080/predictions/bert_explain # Make a Explanation response for the bert model by specifying body in the service envelope in the config.properties file, like below: # # ``` # service_envelope=body # ``` # When a json file is passed as a request format to the curl, Torchserve unwraps the json file from the request body. This is the reason for specifying service_envelope=body in the config.properties file #curl command to make an Explanation Request and the response is as below: #bert_ts.json file contains the input for the inference request # !curl -H "Content-Type: application/json" --data @examples/Huggingface_Transformers/bert_ts.json http://127.0.0.1:8080/explanations/bert_explain # The above explanation response is present in the bert_response.json. From the BERT_Response.json file the attributions, importances and delta key-value pair are loaded input_file=open('./bert_response.json', 'r') input_json = json.load(input_file) attributions = input_json['explanations'][0]['importances'] words = input_json['explanations'][0]['words'] delta = input_json['explanations'][0]['delta'] # For visualization purpose using Captum, the attributions and delta parameters should be in the form of Torch Tensors. # Making the arguments ready to be passed on to the VisualizationDataRecord method.The argument for predictions is given as 1(a random probability) in the VisualizationDataRecord method since it is not mandatory # + attributions = torch.tensor(attributions) predictions = 1 label = 'Not Accepted' true_label = 'Accepted' # - # Using the Visualization Data Record method from Captum's Visualization toolkit to render the visualization # + result = viz.VisualizationDataRecord( attributions, predictions, label, true_label, label, attributions.sum(), words, delta) # - viz.visualize_text([result]) import IPython.display IPython.display.Image(filename="bert_captum_visualization.png")
captum/Captum_visualization_for_bert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MarkMark import lltk import plotnine as p9 # load corpus C=lltk.load('MarkMark') # get some basic info C.info() # ## Install # ### From pre-compiled zips # # Only metadata and 1-gram counts are made available via download. C.download(parts=['metadata','freqs','txt'], force=False) # change force to True to redownload # ### Compile from sources # # Compile metadata and 1-gram counts from ARTFL website. Does not work unless you have institutional access to ARTFL. # + # C.compile(force=False) # set force to True to overwrite existing meta/data # - # ## Preprocessing # + # C.save_freqs?? # - C.preprocess_freqs(num_proc=4, force=True) C.save_dtm() C.dtm(n=100, excl_stopwords=True)
notebooks/test_freqs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=true editable=true # ## This notebook is used to generate the finalized version of the classifier, to simply feature transformation into the final form, and to test that the results are the same # # Most of the code comes from operational_classifier. # + deletable=true editable=true import pandas as pd import numpy as np import pickle import sys #reload(sys) #sys.setdefaultencoding("utf-8") #Loading raw data df = pickle.load(open("../Data/multiclass_tweets_indexed.p",'rb')) tweets = df.text # + [markdown] deletable=true editable=true # ## Feature generation # + deletable=true editable=true from sklearn.feature_extraction.text import TfidfVectorizer import nltk from nltk.stem.porter import * import string import re stopwords=stopwords = nltk.corpus.stopwords.words("english") other_exclusions = ["#ff", "ff", "rt"] stopwords.extend(other_exclusions) stemmer = PorterStemmer() def preprocess(text_string): """ Accepts a text string and replaces: 1) urls with URLHERE 2) lots of whitespace with one instance 3) mentions with MENTIONHERE This allows us to get standardized counts of urls and mentions Without caring about specific people mentioned """ space_pattern = '\s+' giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|' '[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') mention_regex = '@[\w\-]+' parsed_text = re.sub(space_pattern, ' ', text_string) parsed_text = re.sub(giant_url_regex, '', parsed_text) parsed_text = re.sub(mention_regex, '', parsed_text) #parsed_text = parsed_text.code("utf-8", errors='ignore') return parsed_text def tokenize(tweet): """Removes punctuation & excess whitespace, sets to lowercase, and stems tweets. Returns a list of stemmed tokens.""" tweet = " ".join(re.split("[^a-zA-Z]*", tweet.lower())).strip() #tokens = re.split("[^a-zA-Z]*", tweet.lower()) tokens = [stemmer.stem(t) for t in tweet.split()] return tokens def basic_tokenize(tweet): """Same as tokenize but without the stemming""" tweet = " ".join(re.split("[^a-zA-Z.,!?]*", tweet.lower())).strip() return tweet.split() vectorizer = TfidfVectorizer( #vectorizer = sklearn.feature_extraction.text.CountVectorizer( tokenizer=tokenize, preprocessor=preprocess, ngram_range=(1, 3), stop_words=stopwords, #We do better when we keep stopwords use_idf=True, smooth_idf=False, norm=None, #Applies l2 norm smoothing decode_error='replace', max_features=10000, min_df=5, max_df=0.501 ) # + deletable=true editable=true #Construct tfidf matrix and get relevant scores tfidf = vectorizer.fit_transform(tweets).toarray() vocab = {v:i for i, v in enumerate(vectorizer.get_feature_names())} idf_vals = vectorizer.idf_ idf_dict = {i:idf_vals[i] for i in vocab.values()} #keys are indices; values are IDF scores # + deletable=true editable=true #Get POS tags for tweets and save as a string tweet_tags = [] for t in tweets: tokens = basic_tokenize(preprocess(t)) tags = nltk.pos_tag(tokens) tag_list = [x[1] for x in tags] #for i in range(0, len(tokens)): tag_str = " ".join(tag_list) tweet_tags.append(tag_str) #print(tokens[i],tag_list[i]) # + deletable=true editable=true #We can use the TFIDF vectorizer to get a token matrix for the POS tags pos_vectorizer = TfidfVectorizer( #vectorizer = sklearn.feature_extraction.text.CountVectorizer( tokenizer=None, lowercase=False, preprocessor=None, ngram_range=(1, 3), stop_words=None, #We do better when we keep stopwords use_idf=False, smooth_idf=False, norm=None, #Applies l2 norm smoothing decode_error='replace', max_features=5000, min_df=5, max_df=0.501, ) # + deletable=true editable=true #Construct POS TF matrix and get vocab dict pos = pos_vectorizer.fit_transform(pd.Series(tweet_tags)).toarray() pos_vocab = {v:i for i, v in enumerate(pos_vectorizer.get_feature_names())} # + deletable=true editable=true #Now get other features from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as VS from textstat.textstat import * sentiment_analyzer = VS() def count_twitter_objs(text_string): """ Accepts a text string and replaces: 1) urls with URLHERE 2) lots of whitespace with one instance 3) mentions with MENTIONHERE 4) hashtags with HASHTAGHERE This allows us to get standardized counts of urls and mentions Without caring about specific people mentioned. Returns counts of urls, mentions, and hashtags. """ space_pattern = '\s+' giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|' '[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') mention_regex = '@[\w\-]+' hashtag_regex = '#[\w\-]+' parsed_text = re.sub(space_pattern, ' ', text_string) parsed_text = re.sub(giant_url_regex, 'URLHERE', parsed_text) parsed_text = re.sub(mention_regex, 'MENTIONHERE', parsed_text) parsed_text = re.sub(hashtag_regex, 'HASHTAGHERE', parsed_text) return(parsed_text.count('URLHERE'),parsed_text.count('MENTIONHERE'),parsed_text.count('HASHTAGHERE')) def other_features(tweet): """This function takes a string and returns a list of features. These include Sentiment scores, Text and Readability scores, as well as Twitter specific features""" ##SENTIMENT sentiment = sentiment_analyzer.polarity_scores(tweet) words = preprocess(tweet) #Get text only syllables = textstat.syllable_count(words) #count syllables in words num_chars = sum(len(w) for w in words) #num chars in words num_chars_total = len(tweet) num_terms = len(tweet.split()) num_words = len(words.split()) avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4) num_unique_terms = len(set(words.split())) ###Modified FK grade, where avg words per sentence is just num words/1 FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1) ##Modified FRE score, where sentence fixed to 1 FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2) twitter_objs = count_twitter_objs(tweet) #Count #, @, and http:// retweet = 0 if "rt" in words: retweet = 1 features = [FKRA, FRE,syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words, num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'], twitter_objs[2], twitter_objs[1], twitter_objs[0], retweet] #features = pandas.DataFrame(features) return features def get_feature_array(tweets): feats=[] for t in tweets: feats.append(other_features(t)) return np.array(feats) # + deletable=true editable=true other_features_names = ["FKRA", "FRE","num_syllables", "avg_syl_per_word", "num_chars", "num_chars_total", \ "num_terms", "num_words", "num_unique_words", "vader neg","vader pos","vader neu", "vader compound", \ "num_hashtags", "num_mentions", "num_urls", "is_retweet"] # + deletable=true editable=true feats = get_feature_array(tweets) # + deletable=true editable=true #Now join them all up M = np.concatenate([tfidf,pos,feats],axis=1) # + deletable=true editable=true M.shape # + deletable=true editable=true #Finally get a list of variable names variables = ['']*len(vocab) for k,v in vocab.iteritems(): variables[v] = k pos_variables = ['']*len(pos_vocab) for k,v in pos_vocab.iteritems(): pos_variables[v] = k feature_names = variables+pos_variables+other_features_names # + [markdown] deletable=true editable=true # # Running the model # # This model was found using a GridSearch with 5-fold cross validation. Details are in the notebook operational_classifier. # + deletable=true editable=true X = pd.DataFrame(M) y = df['class'].astype(int) # + deletable=true editable=true from sklearn.linear_model import LogisticRegression from sklearn.feature_selection import SelectFromModel from sklearn.metrics import classification_report from sklearn.svm import LinearSVC # + deletable=true editable=true select = SelectFromModel(LogisticRegression(class_weight='balanced',penalty="l1",C=0.01)) X_ = select.fit_transform(X,y) # + deletable=true editable=true model = LinearSVC(class_weight='balanced',C=0.01, penalty='l2', loss='squared_hinge',multi_class='ovr').fit(X_, y) # + deletable=true editable=true y_preds = model.predict(X_) # + deletable=true editable=true report = classification_report( y, y_preds ) # + deletable=true editable=true print(report) # + [markdown] deletable=true editable=true # # Using information from the model to obtain the matrix X_ generically # # This is the most difficult task: We have to take the inputs tweets and transform them into a format that can be used in the model without going through all the same pre-processing steps as above. This can be done as follows. # + [markdown] deletable=true editable=true # ## Obtaining information about the model # + deletable=true editable=true final_features = select.get_support(indices=True) #get indices of features final_feature_list = [unicode(feature_names[i]) for i in final_features] #Get list of names corresponding to indices # + deletable=true editable=true print final_feature_list # + deletable=true editable=true #Getting names for each class of features ngram_features = final_feature_list[:final_feature_list.index('yr')+1] pos_features = final_feature_list[final_feature_list.index('yr')+1:final_feature_list.index('VBD')+1] oth_features = final_feature_list[final_feature_list.index('VBD')+1:] # + deletable=true editable=true # + [markdown] deletable=true editable=true # ## Generating ngram features # + deletable=true editable=true new_vocab = {v:i for i, v in enumerate(ngram_features)} new_vocab_to_index = {} for k in ngram_features: new_vocab_to_index[k] = vocab[k] # + deletable=true editable=true #Get indices of text features ngram_indices = final_features[:len(ngram_features)] # + deletable=true editable=true #TODO: Pickle new vectorizer # + deletable=true editable=true new_vectorizer = TfidfVectorizer( #vectorizer = sklearn.feature_extraction.text.CountVectorizer( tokenizer=tokenize, preprocessor=preprocess, ngram_range=(1, 3), stop_words=stopwords, #We do better when we keep stopwords use_idf=False, smooth_idf=False, norm=None, #Applies l2 norm smoothing decode_error='replace', min_df=1, max_df=1.0, vocabulary=new_vocab ) # - from sklearn.externals import joblib joblib.dump(new_vectorizer, 'final_tfidf.pkl') # + deletable=true editable=true tfidf_ = new_vectorizer.fit_transform(tweets).toarray() # + deletable=true editable=true #Verifying that results are the same # + deletable=true editable=true tfidf_[1,:] # + deletable=true editable=true tfidf_[1,:].sum() # + deletable=true editable=true X_[1,:tfidf_.shape[1]] # + deletable=true editable=true X_[1,:tfidf_.shape[1]].sum() # + [markdown] deletable=true editable=true # Results are the same if use IDF but the problem is that IDF will be different if we use different data. Instead we have to use the original IDF scores and multiply them by the new matrix. # + deletable=true editable=true # + deletable=true editable=true idf_vals_ = idf_vals[ngram_indices] # + deletable=true editable=true idf_vals_.shape # + deletable=true editable=true #TODO: Pickle idf_vals joblib.dump(idf_vals_, 'final_idf.pkl') # + deletable=true editable=true (tfidf_[1,:]*idf_vals_) == X_[1,:153] #Got same value as final process array! # + deletable=true editable=true tfidf_*idf_vals_ == X_[:,:153] # + deletable=true editable=true tfidffinal = tfidf_*idf_vals_ # + [markdown] deletable=true editable=true # ## Generating POS features # This is simpler as we do not need to worry about IDF but it will be slower as we have to compute the POS tags for the new data. Here we can simply use the old POS tags. # + deletable=true editable=true new_pos = {v:i for i, v in enumerate(pos_features)} # + deletable=true editable=true #TODO: Pickle pos vectorizer #We can use the TFIDF vectorizer to get a token matrix for the POS tags new_pos_vectorizer = TfidfVectorizer( #vectorizer = sklearn.feature_extraction.text.CountVectorizer( tokenizer=None, lowercase=False, preprocessor=None, ngram_range=(1, 3), stop_words=None, #We do better when we keep stopwords use_idf=False, smooth_idf=False, norm=None, #Applies l2 norm smoothing decode_error='replace', min_df=1, max_df=1.0, vocabulary=new_pos ) # - joblib.dump(new_pos_vectorizer, 'final_pos.pkl') # + deletable=true editable=true pos_ = new_pos_vectorizer.fit_transform(tweet_tags).toarray() # + deletable=true editable=true pos_[1,:] # + deletable=true editable=true X_[1,153:159] # + deletable=true editable=true pos_[:,:] == X_[:,153:159] # + deletable=true editable=true pos_[:,:].sum() # + deletable=true editable=true X_[:,153:159].sum() # + [markdown] deletable=true editable=true # ## Finally, we can look at the other features # + deletable=true editable=true print other_features_names # + deletable=true editable=true print oth_features # + [markdown] deletable=true editable=true # The functions can be modified to only calculate and return necessary fields. # + deletable=true editable=true def other_features_(tweet): """This function takes a string and returns a list of features. These include Sentiment scores, Text and Readability scores, as well as Twitter specific features""" ##SENTIMENT sentiment = sentiment_analyzer.polarity_scores(tweet) words = preprocess(tweet) #Get text only syllables = textstat.syllable_count(words) #count syllables in words num_chars = sum(len(w) for w in words) #num chars in words num_chars_total = len(tweet) num_terms = len(tweet.split()) num_words = len(words.split()) avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4) num_unique_terms = len(set(words.split())) ###Modified FK grade, where avg words per sentence is just num words/1 FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1) ##Modified FRE score, where sentence fixed to 1 FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2) twitter_objs = count_twitter_objs(tweet) #Count #, @, and http:// features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words, num_unique_terms, sentiment['compound'], twitter_objs[2], twitter_objs[1],] #features = pandas.DataFrame(features) return features def get_feature_array_(tweets): feats=[] for t in tweets: feats.append(other_features_(t)) return np.array(feats) # + deletable=true editable=true feats_ = get_feature_array_(tweets) # + deletable=true editable=true feats_[0,:] # + deletable=true editable=true X_[0,159:] # + deletable=true editable=true feats_[:,:] == X_[:,159:] # + [markdown] deletable=true editable=true # ## Now that we have put it all together using a simplified process we can assess if these new data return the same answers. # + deletable=true editable=true M_ = np.concatenate([tfidffinal, pos_, feats_],axis=1) # + deletable=true editable=true M_.shape # + deletable=true editable=true X__ = pd.DataFrame(M_) # + deletable=true editable=true y_preds_ = model.predict(X__) # + deletable=true editable=true report = classification_report( y, y_preds_ ) # + deletable=true editable=true print(report) # + [markdown] deletable=true editable=true # OK. So now that we have verified that the results are the same with X_ and X__ we can implement a script that can transform new data in this manner. # + deletable=true editable=true
classifier/final_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Tóm tắt # Chúng tôi đề xuất xây dựng hệ thống rút tin tức từ Internet và thống kê các từ khóa chính xuất hiện theo thời gian thực. Cơ sở dữ liệu căn bản của chúng tôi dùng là Postgresql, kết hợp với các tiện ích khác để xây dựng hệ thống lưu trữ phân tán và cơ sở dữ liệu chuỗi thời gian giúp đáp ứng được các yêu cầu cơ bản. Hệ thống được đề xuất tuy đơn giản nhưng có khả năng đáp ứng được các yêu cầu của dữ liệu lớn và dễ dàng mở rộng khi cần. # ### Giới thiệu # Rút tin (Web scraping) là quá trình xử lý, rút trích thông tin tự động từ các trang trên Internet để tổng hợp và làm giàu nguồn dữ liệu cho chính ứng dụng hiện tại. Một hệ thống rút tin đơn giản bao gồm frontier chứa các url cần được xử lý. Các url sẽ được xử lý tuần tự hoặc theo một nguyên tắc nào đó và rút các nội dung từ thẻ html. Các nội dung sẽ được thêm vào một cơ sở dữ liệu lưu trữ cùng với các url mới được tiếp tục thêm vào frontier. # # CSDL quan hệ (SQL) được ra mắt từ rất sớm và đem lại nhiều lợi ích cho con người. Nhưng khi nhu cầu khai thác dữ liệu tăng cao, các CSDL phi quan hệ (NoSQL) khi đó ra đời như MapReduce (1), Google Bigtable (2), Cassandra (3), Mongodb (4),... đem lại nhiều tiện ích khiến chúng ta dần quên đi SQL trong các ứng dụng. Phải mất một thời gian, các CSDL quan hệ được mới tiếp tục được sử dụng nhiều trở lại bởi các dự án mã nguồn mở đã làm cho chúng ngày càng mềm dẻo. # # # Trong nhiều trường hợp, việc sử dụng các NoSQL là không cần thiết và thậm chí còn gây lãng phí bởi các CSDL quan hệ ngày nay đã đáp ứng được các dữ liệu phi cấu trúc. Postgresql sử dụng những kiểu dữ liệu cơ bản của SQL thông thường cộng thêm một số kiểu dữ liệu phi cấu trúc làm cho nó trở nên tiện dụng. Ngoài ra, các tiện ích mở rộng cũng như bản quyền Postgres đã giúp cho nó phát triển nhanh và trở thành CSDL được yêu thích nhất năm 2019 (5). Chính vì các điểm trên, chúng tôi đề xuất sử dụng Postgresql làm cơ sở dữ liệu lưu trữ thông tin cho ứng dụng. # # Các điểm nhấn của đồ án cuối kỳ như sau: # # 1. Xây dựng hệ thống rút tin tức và thống kê từ khóa đơn giản nhưng đáp ứng được luồng dữ liệu đến cực lớn và có khả năng mở rộng linh hoạt # 2. Đề xuất sử dụng cơ sở dữ liệu (CSDL) quan hệ và các tiện ích mở rộng để lưu trữ và thống kê dữ liệu thay cho các CSDL phi quan hệ (NoSQL) # 3. Triển khai các dịch vụ trên docker # # ### Hệ thống # Chúng tôi đề xuất sử dụng Postgresql (Postgres), Redis, Kafka cùng một số tiện ích mở rộng của Postgres làm cơ sở xây dựng hệ thống. Hình bên dưới minh họa tóm tắt hệ thống chúng tôi xây dựng. # <img src="images/system.png"> # Hệ thống trên gồm hai thành phần chính. ```www``` có nhiệm vụ thu thập các url từ những trang web cho trước. Các url được chuẩn hóa và lưu trữ dưới dạng mã hóa sha1 trong Redis một bản duy nhất (với key là mã băm của url). Với những url mới, ```www``` đưa chúng vào Kafka và chờ chương trình rút nội dung, rút thực thể xử lý. # # Vì các bài báo mới không nhiều nên chúng tôi đưa chúng trực tiếp vào Postgres. Các keywords xuất hiện nhiều nên chúng tôi đưa chúng vào Kafka và dùng tiện ích mở rộng khác của Postgres là PipelineDB + PipelineKafka để đồng bộ số dữ liệu này. Các từ khóa sẽ được thống kê thông qua khung nhìn liên tục của PipelineDB được viết giống khung nhìn của SQL cơ bản. # # Để phân tán dữ liệu, chúng tôi sử dụng Citusdata với master và các worker node. Bảng lưu trữ dữ liệu sẽ được tạo thành bảng phân tán thông qua lệnh ```create_distributed_table``` của Citus. Vì phần cứng hạn chế nên chúng tôi chỉ thực hiện khởi tạo 1 master node và 3 worker tương ứng với master và slave 1 - 3. # # Chúng tôi thực hiện cài đặt các phần mềm cơ sở dữ liệu trực tiếp trên các slave master và slave 1 - 3. Các dịch vụ, mã nguồn khác được chạy độc lập trên container ở slave 4 # ### Cài đặt # #### Cài đặt docker và docker-compose trên slave 4 # Cài đặt các gói cần thiết # # ```sudo apt install apt-transport-https ca-certificates curl software-properties-common``` # # Tiếp theo, cập nhật key của docker vào repository của ubuntu # # ```curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -``` # # Thêm vào apt source list # # ```sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable"``` # # Cập nhật repository # # ```sudo apt-get update``` # # Cài đặt docker community edition # # ```sudo apt install -y docker-ce``` # ##### Cài đặt Postgresql tại master và các slave còn lại # Thêm key postgresql # # ```wget -q https://www.postgresql.org/media/keys/ACCC4CF8.asc -O- | sudo apt-key add -``` # # Thêm vào source list # # ```echo "deb http://apt.postgresql.org/pub/repos/apt/ bionic-pgdg main" | sudo tee /etc/apt/sources.list.d/postgresql.list``` # # Cập nhật repository # # ```sudo apt-get update``` # # Cài đặt postges 11 # # ```sudo apt-get install -y postgresql-11``` # #### Citusdata tại master và các slave còn lại # Thêm vào repository # # ```curl https://install.citusdata.com/community/deb.sh | sudo bash``` # # Cài Citus # # ```sudo apt-get -y install postgresql-11-citus``` # # Sau khi cài đặt xong Postgres và Citus ở các máy, tiếp tục thực hiện mở địa chỉ để các node Postgres giao tiếp với nhau. Thực hiện các bước sau trên tất cả các máy này: # # 1. Chỉnh listen # # ```sudo vim /etc/postgresql/11/main/postgresql.conf``` # # tìm dòng ```listen_addresses='127.0.0.1``` và thay bằng ```listen_addresses='*'``` # # 2. Tiếp theo thêm extension bằng cách thêm dòng # # ```shared_preload_libraries = 'citus'``` # # 3. Lưu và tiếp tục sửa # # ```sudo vim /etc/postgresql/11/main/pg_hba.conf``` # # thay # # ```host all all 127.0.0.1/32 md5``` # # thành # # ```host all all 0.0.0.0/0 trust``` # # # 4. Lưu và chạy lệnh khởi động lại pg server # # ```service postgresql restart``` # # ** Kiểm tra xem pg còn hoạt động hay không # # # ```service postgresql status``` # # và cluster # # ```pg_lsclusters``` # # #### Citusdata tại master node # Chọn csdl và tạo extension bằng lệnh # # ```CREATE EXTENSION citus;``` # # Kết nối với các slave bằng lệnh # # ```SELECT * from master_add_node(SLAVE_IP_ADDRESS_OR_NAME, BINDING_PORT);``` # # ví dụ: # # ```SELECT * from master_add_node('slave-1', 5432);``` # #### Cài PipelineDB tại master # # Thêm apt repository # # ```curl -s http://download.pipelinedb.com/apt.sh | sudo bash``` # # Cài đặt # # ```sudo apt-get install pipelinedb-postgresql-11``` # # Chỉnh preload của pg trong tập tin ```sudo vim /etc/postgresql/11/main/postgresql.conf``` # # ```shared_preload_libraries = 'citus,pipelinedb'``` # # và # # ```max_worker_processes = 128``` # # khởi động lại Postgres server bằng lệnh ```service postgresql restart``` # #### Cài kafka-pipeline # # Thực hiện lần lượt các lệnh sau: # # ```sudo apt-get install git gcc g++ zlib1g-dev``` # # ```git clone https://github.com/edenhill/librdkafka.git``` # # ```git clone https://github.com/pipelinedb/pipeline_kafka.git``` # # ```cd librdkafka``` # # ```./configure --prefix=/usr ``` # # ```make && sudo make install``` # # # ```cd pipeline_kafka``` # # ```./configure ``` # # ```make && sudo make install``` # # Cuối cùng, chỉnh preload trong postgresql.conf và khởi động lại server # # ```shared_preload_libraries = 'citus,pipelinedb,kafka-pipeline'``` # #### Khởi động các container app tại slave 4 # Di chuyển đến thư mục project và gõ lệnh: # # ```sudo docker-compose up -d``` # ### Chi tiết hiện thực # Chúng tôi hiện thực chương trình bằng python3.6 được chạy trên ubuntu 18.04. Chúng tôi thực hiện kết nối với Postgresql bằng ```sqlalchemy```, thực hiện các thao tác với Kafka bằng ```kafka-python```. # # Các bài báo được rút từ 150 nguồn tiếng Anh (xem ```pages.txt```). Chúng tôi thực hiện rút tiêu đề, tóm tắt, nội dung, ngày đăng, tác giả từ bài báo thông qua thư viện ```newspaper3k```. Từ tiêu đề và nội dung, chúng tôi tiếp tục thực hiện rút các thực thể, hay còn gọi là từ khóa bằng thư viện ```nltk```. Các từ khóa sẽ được thống kê số lượng xuất hiện mỗi ngày bằng khung nhìn liên tục của PipelineDB từ bảng stream. Khung nhìn liên tục được viết bằng lệnh sql: # # # ```CREATE VIEW keyword_stats WITH (action=materialize) AS # SELECT data.keyword, DATE(data.created_date) AS date, COUNT(DATE(data.created_date)) AS total # FROM public.data # GROUP BY data.keyword, DATE(data.created_date) # ``` # Mã nguồn đồ án nằm trong thư mục ```src``` gồm các tập tin: # # 1. ```arguments.py``` chứa các tham số khi chạy chương trình như sau: # # ```--kafka_host``` địa chỉ host và port để kết nối đến kafka server. Ví dụ: 127.0.0.1:9092 # # ```--kafka_link_topic``` topic chứa các liên kết, dùng như hàng đợi chuẩn bị cho rút nội dung # # ```--kafka_keyword_topic``` topic chứa các keyword sau khi đã được rút nội dung. PipelineDB sẽ liên kết đến topic này và tự động stream dữ liệu và bảng chuẩn bị cho thống kê # # ```--kafka_default_group``` nhóm làm việc của kafka consumer, dùng để theo dõi những offset đã duyệt qua để tiếp tục chạy phòng trường hợp consumer bị ngắt # # ```--redis_host``` địa chỉ host của redis database # # ```--redis_port``` port redis # # ```--redis_db``` số thứ tự của redis database. Mặc định ở đây là $1$ # # ```--redis_password``` <PASSWORD> khẩu của redis server # # ```--pg_host``` địa chỉ host của postgresql # # ```--pg_port``` port postgres # # ```--pg_user``` user được quyền truy cập vào database # # ```--pg_password``` password của user # # ```--pg_db``` database cần được truy cập # # ```--pg_relation``` tên bảng chứa dữ liệu tin được rút về # # # # 2. ```helper.py``` hiện thực các phương thức như kết nối database, database model, rút thực thể (keyword) từ text, mã hóa url, chuẩn hóa url # # 3. ```logger.py``` ghi log của chương trình # 4. ```visitor.py``` lấy các link nội dung từ url cho trước và gởi vào Kafka # 5. ```scraper.py``` rút nội dung từ các link trong Kafka topic. Sau khi thực hiện xong, nội dung bài báo sẽ được gởi thẳng vào Postgresql. Các keywords được gởi vào PipelineDB thông qua Kafka # *** Ví dụ chạy xem trong tập tin ```runvisitor.sh``` và ```runscraper.sh``` # ### Kết quả # ### Tham khảo # Cài đặt docker: https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-18-04 # # Docker Kafka và Zookeeper: https://github.com/wurstmeister/kafka-docker # # Docker Redis: https://hub.docker.com/_/redis # # Cài đặt Postgres 11: https://www.itzgeek.com/how-tos/linux/ubuntu-how-tos/how-to-install-postgresql-10-on-ubuntu-18-04-lts.html # # Citus data: http://docs.citusdata.com/ # # PipelineDB: http://docs.pipelinedb.com/ # # #
report.ipynb
# ## Creating and loading arrays # ### Creating arrays import numpy as np print("ones", np.ones(5)) print("arange", np.arange(5)) print("linspace", np.linspace(0., 1., 5)) print("random", np.random.uniform(size=3)) print("custom", np.array([2, 3, 5])) np.array([[1, 2], [3, 4]]) np.ones(5, dtype=np.int64) np.arange(5).astype(np.float64) # ### Loading arrays from files import pandas as pd data = pd.read_csv('../chapter2/data/nyc_data.csv') pickup = data[['pickup_longitude', 'pickup_latitude']].values pickup pickup.shape
Section 3/32-creating.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Chapter 1: Introduction # *** # # ### §1.1 What is Python # # Python is an interpreted, high-level and general-purpose programming language. Python's design philosophy emphasizes code readability with its notable use of significant indentation. # # # - An interpreted high-level programming language # - Emphasizes code readability # - With tons of data science libraries # # #### What can python do? # - Software and web application development ([Django](https://www.djangoproject.com/), [Flask](https://palletsprojects.com/p/flask/)...) # - Data processing, scientific computing ([Pandas](https://pandas.pydata.org/),[scipy](https://www.scipy.org/)...), # - Machine learning, artificial intelligence ([scikit-learn](https://scikit-learn.org/stable/),[PySpark](https://spark.apache.org/docs/latest/api/python/index.html),[PyTorch](https://pytorch.org/)...) # - System scripting, robotic processing # - Blockchain ([Ethereum Development](https://ethereum.org/en/developers/docs/programming-languages/python/)...) # - and many more... # # #### Why do we use Python # - Easy to read # - Easy to write # - Easy to learn # ### §1.2 Install Python3 # #### Windows # Download Python installer from official website according to your Windows version (32-bit or 64-bit). # Run the downloaded installer program, remember to select `Add Python 3.x to PATH` before clicking `Install Now`. # # To run Python3 REPL (interactice program, REPL stands for Read-Evaluate-Print-Loop), type `python` in Command Prompt. # If you see error message `'python' is not recognized as an internal or external command`, then add `the path to python.exe` to `environment variables`. # # > Type `exit()` to quite Python REPL. # # #### MacOS # The pre-installed Python version is 2.7. There are 2 ways to install Python3: # Method 1: # Download installation package from Python official website: # [https://www.python.org/downloads/](https://www.python.org/downloads/) # # Method 2: # Run command `brew install python3` if Homebrew is installed. # # To run Python3 REPL, type `python3` in Terminal application. # ### §1.3 Jupyter Notebook # #### Install Jupyter Notebook through Anaconda # [https://www.anaconda.com/download](https://www.anaconda.com/download) # # > Note: Python and pip (Python package manager) are included in Anaconda. After the installation, the default python path is modified by Anaconda. # # #### Jupyter Notebook Shortcuts # **Edit mode**: text area is focused, left border is **<span style="color:#66BB6A">green</span>** # **Command mode**: text area is out of focus, left border is **<span style="color:#42A5F5">blue</span>** # <h4><center>Keyboard shortcut</center></h4> # # | Key| Description|<th>Mode</th>| # |---|---|---| # | `ctrl` + `enter` | run cell |<td rowspan=10>Command</td>| # | `a` | insert cell above | # | `b` | insert cell below | # | `c` | copy the cell selected | # | `v` | paste cell | # | `d`, `d` | selete the selected cell | # | `shift` + `m` | merge selected cells | # | `y` | change cell to Code | # | `m` | change cell to Markdown | # | `h` | view all keyboard shortcuts | # | `cmd/ctrl` + `click` | multi-cursor editing |<td rowspan=3>Edit</td>| # | `cmd/ctrl` + `/` | toggle command line | # | `ctrl` + `shift` + `-` | split cell | # |`cmd` + `shift` + `p` | search commands |<td>Any mode</td>| # # Press `Esc` to exit `Edit mode` and enter `Command mode`. Use up/down arrow keys to select different lines. # Press `Enter` to enter the `Edit mode` of the selected line. # # > Tips: # to clean a cell's output, # select the target cell, then press `Esc`, `r`, `y` in sequence.
notebook/Chapter1_A Brief Introduction to Python.ipynb