code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import psycopg2 import pandas as pd import numpy as np pd.options.display.max_columns = 300 #connect SQL conn = psycopg2.connect(database='usaspendingdb', user='postgres', password='<PASSWORD>', host='127.0.0.1', port='5432') sql_cols = ('federal_action_obligation, ' 'base_and_exercised_options_value, ' 'base_and_all_options_value, ' 'awarding_sub_agency_name, ' 'awarding_office_name, ' 'funding_sub_agency_name, ' 'funding_office_name, ' 'primary_place_of_performance_state_code, ' 'award_or_idv_flag, ' 'award_type, ' 'type_of_contract_pricing, ' 'dod_claimant_program_description, ' 'type_of_set_aside_code, ' 'contract_bundling, ' 'national_interest_action, ' 'gfe_gfp, ' 'contract_financing, ' 'portfolio_group, ' 'product_or_service_code_description, ' 'naics_bucket_title, ' 'naics_description' ) #Create DF df = pd.read_sql_query('SELECT ' + sql_cols + ' FROM consolidated_data_filtered_bucketed', con=conn) df.shape #Check if there is any null in DF. df.isnull().sum() #Drop null rows from 'type_of_set_aside_code' column. df = df[pd.notnull(df['type_of_set_aside_code'])] df.shape df['type_of_set_aside_code'].value_counts() df['set_aside_number'] = df['type_of_set_aside_code'].map({'NONE':1, 'SBA':2, 'WOSB':3, '8A':4, '8AN':5, 'SDVOSBC':6,'HZC':7, 'SBP':8, 'SDVOSBS':9, 'EDWOSB':10, 'WOSBSS':11, 'HZS':12, 'ISBEE':13}) df['set_aside_number'].value_counts() def contract_value(c): if c['base_and_exercised_options_value'] > 0: return c['base_and_exercised_options_value'] elif c['base_and_all_options_value'] > 0: return c['base_and_all_options_value'] elif c['federal_action_obligation'] > 0: return c['federal_action_obligation'] else: return 0 df['contract_value'] = df.apply(contract_value, axis=1) #Drop columns that we dont need anymore. df = df.drop(['type_of_set_aside_code','base_and_exercised_options_value','base_and_all_options_value', 'federal_action_obligation'], axis=1) #Create another DF with non null columns and rows. df2 = df.dropna() df2.shape df3 = pd.get_dummies(df2) X = df3.drop(['set_aside_number'], axis=1) y = df3['set_aside_number'] from sklearn.model_selection import train_test_split, cross_val_score from sklearn.metrics import confusion_matrix , classification_report from sklearn.neighbors import KNeighborsClassifier X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42) model = KNeighborsClassifier(n_neighbors=13) model.fit(X_train, y_train) predictions = model.predict(X_test) from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, predictions)) print(classification_report(y_test, predictions)) score = cross_val_score(estimator=model, X=X, y=y, cv=12) print('Accuracy : ',score.mean())
KNN/.ipynb_checkpoints/KNN_classification_All_Set_Aside-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !conda install python-slugify # + # %load_ext autoreload # %autoreload 2 import sys sys.path.append('/home/vera0519/vera_911') import pandas as pd # import cenpy from slugify import slugify from pathlib import Path import src.features.call_types as call_types from src.cities.new_orleans import NewOrleans from src.cities.seattle import Seattle from src.cities.dallas import Dallas from src.cities.detroit import Detroit from src.cities.charleston import Charleston import matplotlib.pyplot as plt import src.features.geo as Geo from src.features.call_types import load_call_mappings, assign_disposition, process import src.visualization.visualize as vis # - 2 new_orleans = NewOrleans() dallas = Dallas() seattle = Seattle() detroit = Detroit() charleston = Charleston() # new_orleans.process_data() # dallas.process_data() # detroit.process_data() charleston.process_data() # seattle.process_data() # + # seattle.process_data() # - new_orleans.clean_data(reload=False) dallas.clean_data(reload=False) detroit.clean_data(reload=False) charleston.clean_data(reload=False) seattle.clean_data(reload=False); def summary_office_initiated(city): return (city.clean_data() .groupby(['year','self_initiated']) .count() .reset_index() .pivot_table(index='year', columns='self_initiated', values='day_of_week') .assign(total=lambda x: x.sum(axis=1)) .assign(percent_other = lambda x: 100*x.No/x.total, pecent_self_initaited = lambda x: 100*x.Yes/x.total) .rename(columns={'No' : 'other', 'Yes': 'self_initiated'})) # + def summary_by_type(city): return (city.clean_data() .loc[lambda x: x.year.isin([2014,2015,2016,2018,2019])] .groupby(['year','self_initiated']) .count() .reset_index() .pivot_table(index='year', columns='self_initiated', values='day_of_week') .assign(total=lambda x: x.sum(axis=1)) .assign(percent_other = lambda x: 100*x.No/x.total, pecent_self_initaited = lambda x: 100*x.Yes/x.total) .rename(columns={'No' : 'other', 'Yes': 'self_initiated'})) # .assign(percent_other = lambda x: 100*x.No/x.total, # pecent_self_initaited = lambda x: 100*x.Yes/x.total) # .rename(columns={'No' : 'other', 'Yes': 'self_initiated'})) # - def summary_by_type_disposition(city, percentage=False): return (city.clean_data() .loc[lambda x: x.year.isin([2014,2015,2016,2018,2019])] .rename(columns={'year': 'Year', 'call_type': 'Incident Type','disposition':"Disposition"}) .groupby(['Year','Incident Type','Disposition']) .count()[['index']] .rename(columns= {'index' : 'Frequency'}) .reset_index() .pivot_table(index=['Year','Incident Type'], columns='Disposition', values='Frequency') .fillna(0) .pipe(lambda x: 100* x.div(x.sum(axis=1),axis =0 ) if percentage else x) .rename(columns = lambda x: "Percent {}".format(x) if percentage else "Frequency {}".format(x)) ) def full_summary_by_type_disposition(city): return pd.merge( summary_by_type_disposition(new_orleans, percentage=False), summary_by_type_disposition(new_orleans, percentage=True), left_index=True, right_index=True ) def summary_of_priority(city, percentage = False): return (city.clean_data() .rename(columns= {'year': 'Year', 'priority': 'Priority'}) .groupby(['Year','Priority']) .count() [['index']] .rename(columns={'index': 'Frequency'}) .reset_index() .pivot_table(index='Year', columns='Priority',values='Frequency') .pipe(lambda x: 100* x.div(x.sum(axis=1),axis =0 ) if percentage else x) .fillna(0) .rename(columns = lambda x: "Percent {}".format(x) if percentage else "Frequency {}".format(x)) ) # ## Generate reports # City Priorty Breakdown with pd.ExcelWriter('../../reports/CityPriorityBreakdown.xlsx') as writer: summary_of_priority(new_orleans).to_excel(writer,sheet_name='New Orleans') # summary_of_priority(dallas).to_excel(writer,sheet_name='Dallas') summary_of_priority(detroit).to_excel(writer,sheet_name='Detroit') summary_of_priority(seattle).to_excel(writer,sheet_name='Seattle') # summary_of_priority(charleston).to_excel(writer,sheet_name='Charleston') # City Call Type Disposition # + with pd.ExcelWriter('../../reports/CityCallTypeDisposition.xlsx') as writer: full_summary_by_type_disposition(new_orleans).to_excel(writer,sheet_name='New Orleans') full_summary_by_type_disposition(dallas).to_excel(writer,sheet_name='Dallas') full_summary_by_type_disposition(detroit).to_excel(writer,sheet_name='Detroit') full_summary_by_type_disposition(charleston).to_excel(writer,sheet_name='Charleston') full_summary_by_type_disposition(seattle).to_excel(writer, sheet_name='Seattle') # for worksheet in writer.sheets: # for i in range(10): # worksheet.set_column(i,i,15) # - # City Officer Initiated Summary with pd.ExcelWriter('../../reports/CityOfficerInitiatedSummary.xlsx') as writer: summary_office_initiated(new_orleans).to_excel(writer,sheet_name='New Orleans', merge_cells=False) # summary_office_initiated(dallas).to_excel(writer,sheet_name='Dallas') summary_office_initiated(detroit).to_excel(writer,sheet_name='Detroit', merge_cells=False) summary_office_initiated(seattle).to_excel(writer,sheet_name='Seattle') # summary_office_initiated(charleston).to_excel(writer,sheet_name='Charleston') # Call Type summary with pd.ExcelWriter('../../reports/CityCallTypeSummary.xlsx') as writer: summary_by_type(new_orleans).to_excel(writer,sheet_name='New Orleans') # summary_by_type(dallas).to_excel(writer,sheet_name='Dallas') summary_by_type(detroit).to_excel(writer,sheet_name='Detroit') summary_by_type(seattle).to_excel(writer,sheet_name='Seattle') # summary_by_type(charleston).to_excel(writer,sheet_name='Charleston') # ## Generate Plots import matplotlib.pyplot as plt # %matplotlib inline import geopandas as gp from mpl_toolkits.axes_grid1 import make_axes_locatable # + map_crs= {'init':'epsg:3857'} from matplotlib.font_manager import FontProperties fontP = FontProperties() fontP.set_size('xx-large') def map_call_volume(city,norm_by=None, ax=None, year=None, call_type=None,scheme=None, vrange=None): if(not ax): fig = plt.figure() ax = fig.add_subplot(111) data = city.call_volume_by_tract(norm_by='capita',year=year, call_type=call_type) if(vrange): data.to_crs(map_crs).plot(column='calls', legend=True,ax=ax, scheme=scheme, vmin=vrange[0],vmax=vrange[1], legend_kwds={ 'loc': "lower center"}) else: data.to_crs(map_crs).plot(column='calls', legend=True,ax=ax, scheme=scheme,legend_kwds={ 'loc': "best", "bbox_to_anchor":(1.35,1),'fontsize':'xx-large','title':'Calls per capita'} ) ax.set_title('Calls per capita') ax.legend(title_fontsizve='xx-large') ax.set_axis_off() return ax # + city = detroit tracts = city.load_tracts().to_crs(vis.map_crs) fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111) map_call_volume(city, norm_by='capita', ax=ax, scheme='fisher_jenks') leg_kwds_dict={'fontsize':'xx-large'} tracts.plot(color='none', edgecolor='white', ax=ax, linewidth=0.2, legend_kwds=leg_kwds_dict) ax.set_title("") plt.tight_layout() leg = ax.get_legend() leg.set_title('Calls per capita',prop = fontP ) # + BASE_MAP_DIR = Path('/home/vera0519/vera_911/reports/VeraExport/Maps') def generate_per_capita_maps(geometry='tract'): plot_dir = BASE_MAP_DIR / "call_volumne_per_capita" plot_dir.mkdir(exist_ok=True) figsize = (10,10) for city in [new_orleans,dallas,detroit,charleston]: tracts = city.load_tracts().to_crs(vis.map_crs) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles') tracts.plot(color='none', edgecolor='red', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-all_CFS.png'.format(city.BASE_NAME, 'calls_per_capita')) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles',year=year) tracts.plot(facecolor='none', edgecolor='black', ax=ax) ax.set_title('{} / calls per capita / {} / {}'.format(city.BASE_NAME, 'All Years', 'All CFS')) fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'calls_per_capita',year)) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles',year=year) ax.set_title('{} / calls per capita / {} / {}'.format(city.BASE_NAME, year, 'All CFS')) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'calls_per_capita',year)) for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles',call_type=cfs) ax.set_title('{} / calls per capita / {} / {}'.format(city.BASE_NAME, 'All Years', cfs)) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME, 'calls_per_capita',slugify(cfs))) for year in city.USE_YEARS: for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles',year=year,call_type=cfs) ax.set_title('{} / calls per capita / {} / {}'.format(city.BASE_NAME, year, cfs)) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME,'calls_per_capita',slugify(cfs))) # def generate_disposition_maps(geometry='tract'): # vis.plot_disposition_counts # def generate_response_time_maps(geometry='tract'): # vis.map_median_response_time # def generate_demographic_maps(geometry='tract'): # vis.demgraphics() generate_per_capita_maps(); # - def generate_self_initiated_fraction_maps(geometry='tract'): plot_dir = BASE_MAP_DIR / "officer_initiated" plot_dir.mkdir(exist_ok=True) figsize = (10,10) for city in [new_orleans,dallas,detroit,charleston]: tracts = city.load_tracts().to_crs(vis.map_crs) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_self_initiated(city, norm_by='total', ax=ax) tracts.plot(color='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-all_CFS.png'.format(city.BASE_NAME, 'officer_initiated_fraction')) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_self_initiated(city, norm_by='total', ax=ax, year=year) tracts.plot(facecolor='none', edgecolor='black', ax=ax) ax.set_title('{} / officer initiated fraction / {} / {}'.format(city.BASE_NAME, 'All Years', 'All CFS')) fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'officer_initiated_fraction',year)) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_self_initiated(city, norm_by='total', ax=ax,year=year) ax.set_title('{} / officer initiated fraction / {} / {}'.format(city.BASE_NAME, year, 'All CFS')) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'officer_initiated_fraction',year)) for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_self_initiated(city, norm_by='total', ax=ax,call_type=cfs) ax.set_title('{} / officer initiated fraction / {} / {}'.format(city.BASE_NAME, 'All Years', cfs)) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME, 'officer_initiated_fraction',slugify(cfs))) for year in city.USE_YEARS: for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_self_initiated(city, norm_by='total', ax=ax,year=year,call_type=cfs) ax.set_title('{} / officer initiated fraction / {} / {}'.format(city.BASE_NAME, year, cfs)) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME,'officer_initiated_fraction',slugify(cfs))) generate_self_initiated_fraction_maps(); def generate_enforcement_action_maps(geometry='tract'): plot_dir = BASE_MAP_DIR / "enforcement_action" plot_dir.mkdir(exist_ok=True) figsize = (10,10) for city in [new_orleans,dallas,detroit,charleston]: tracts = city.load_tracts().to_crs(vis.map_crs) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_enforcement_by_tract(city, ax=ax) tracts.plot(color='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-all_CFS.png'.format(city.BASE_NAME, 'enforcement_action')) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_enforcement_by_tract(city, ax=ax, year=year) tracts.plot(facecolor='none', edgecolor='black', ax=ax) ax.set_title('{} / Enforcement Action / {} / {}'.format(city.BASE_NAME, 'All Years', 'All CFS')) fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'enforcement_action',year)) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_enforcement_by_tract(city, ax=ax,year=year) ax.set_title('{} / Enforcement Action / {} / {}'.format(city.BASE_NAME, year, 'All CFS')) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'enforcement_action',year)) for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_enforcement_by_tract(city, ax=ax,call_type=cfs) ax.set_title('{} / Enforcement Action / {} / {}'.format(city.BASE_NAME, 'All Years', cfs)) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME, 'enforcement_action',slugify(cfs))) for year in city.USE_YEARS: for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_enforcement_by_tract(city, ax=ax,year=year,call_type=cfs) ax.set_title('{} / Enforcement Action / {} / {}'.format(city.BASE_NAME, year, cfs)) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME,'enforcement_action',slugify(cfs))) generate_enforcement_action_maps(); def generate_response_time_maps(geometry='tract'): plot_dir = BASE_MAP_DIR / "median_response_time" plot_dir.mkdir(exist_ok=True) figsize = (10,10) for city in [new_orleans,dallas,detroit,charleston]: tracts = city.load_tracts().to_crs(vis.map_crs) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_median_response_time(city, ax=ax) tracts.plot(color='none', edgecolor='black', ax=ax, scheme='percentiles') fig.savefig(plot_dir / '{}-{}-tracts-all_years-all_CFS.png'.format(city.BASE_NAME, 'median_response_time')) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_median_response_time(city, ax=ax, year=year, scheme='percentiles') tracts.plot(facecolor='none', edgecolor='black', ax=ax) ax.set_title('{} / Median Response Time / {} / {}'.format(city.BASE_NAME, 'All Years', 'All CFS')) fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'median_response_time',year)) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_median_response_time(city, ax=ax,year=year, scheme='percentiles') ax.set_title('{} / Median Response Time / {} / {}'.format(city.BASE_NAME, year, 'All CFS')) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'median_response_time',year)) for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_median_response_time(city, ax=ax,call_type=cfs, scheme='percentiles') ax.set_title('{} / Median Response Time / {} / {}'.format(city.BASE_NAME, 'All Years', cfs)) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME, 'median_response_time',slugify(cfs))) for year in city.USE_YEARS: for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.map_median_response_time(city, ax=ax,year=year,call_type=cfs, scheme='percentiles') ax.set_title('{} / Median Response Time / {} / {}'.format(city.BASE_NAME, year, cfs)) tracts.plot(facecolor='none', edgecolor='black', ax=ax) fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME,'median_response_time',slugify(cfs))) generate_response_time_maps(); # + BASE_CHARTS_DIR = Path('/home/vera0519/vera_911/reports/VeraExport/Charts') def generate_CFS_breakdown(): plot_dir = BASE_CHARTS_DIR / 'CFS_breakdown' plot_dir.mkdir(exist_ok=True) figsize=(10,10) for city in [new_orleans,dallas,detroit,charleston,seattle]: print('Doint city {}'.format(city.BASE_NAME)) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_cfs_breakdown(city) fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'cfs_breakdown')) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_cfs_breakdown(city,year=year) fig.savefig(plot_dir / '{}-{}-{}.png'.format(city.BASE_NAME, 'cfs_breakdown',year)) # generate_CFS_breakdown() # def generate_call_type_by_CFS(): # vis.plot_self_initated_by_call_type # def generate_response_time(): # plot_response_time_dist # vis.plot_disposition_fraction_by_call_Type(city) # def generate_top_bottom_breakdown_type_disposition(): # + def generate_disposition_by_CFS(): plot_dir = BASE_CHARTS_DIR / 'disposition_by_CFS' plot_dir.mkdir(exist_ok=True) figsize=(15,10) for city in [new_orleans,dallas,detroit,charleston,seattle]: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_disposition_fraction_by_call_Type(city, ax=ax) ax.set_xlim(0,1) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_title('{} - {}'.format(city.BASE_NAME,'All Years')) plt.tight_layout() fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'disposition_by_CFS')) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_disposition_fraction_by_call_Type(city, ax=ax, year=year) ax.set_xlim(0,1) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_title('{} - {}'.format(city.BASE_NAME,year)) plt.tight_layout() fig.savefig(plot_dir / '{}-{}-{}.png'.format(city.BASE_NAME, 'disposition_by_CFS',year)) generate_disposition_by_CFS() # - def generate_response_time_by_CFS(): plot_dir = BASE_CHARTS_DIR / 'response_time' plot_dir.mkdir(exist_ok=True) figsize=(15,10) for city in [new_orleans,dallas,detroit,charleston,seattle]: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_response_time_dist(city, ax=ax) ax.set_title('{} - {}'.format(city.BASE_NAME,'All Years')) fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'response_time')) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_response_time_dist(city, ax=ax, year=year) ax.set_title('{} - {} - {} - {}'.format(city.BASE_NAME,'response_time',year, 'All CFS')) fig.savefig(plot_dir / '{}-{}-{}-{}.png'.format(city.BASE_NAME, 'response_time',year,'All CFS')) for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_response_time_dist(city, ax=ax, call_type=cfs) ax.set_title('{} - {} - {} - {}'.format(city.BASE_NAME,'response_time','All Years', cfs)) fig.savefig(plot_dir / '{}-{}-{}-{}.png'.format(city.BASE_NAME, 'response_time','all_years',slugify(cfs))) for year in city.USE_YEARS: for cfs in city.clean_data().call_type.unique(): if(cfs != None): fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_response_time_dist(city, ax=ax, year=year) ax.set_title('{} - {} - {} - {}'.format(city.BASE_NAME,'response_time',year, cfs)) fig.savefig(plot_dir / '{}-{}-{}-{}.png'.format(city.BASE_NAME, 'response_time',year,slugify(cfs))) generate_disposition_by_CFS() # + def self_initiated_by_CFS(): plot_dir = BASE_CHARTS_DIR / 'officer_initiated_by_CFS' plot_dir.mkdir(exist_ok=True) figsize=(15,10) for city in [new_orleans,dallas,detroit,charleston,seattle]: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_self_initated_by_call_type(city, ax=ax) ax.set_xlim(0,1) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_title('{} - {}'.format(city.BASE_NAME,'All Years')) plt.tight_layout() fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'officer_initiated_by_CFS')) for year in city.USE_YEARS: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) vis.plot_self_initated_by_call_type(city, ax=ax, year=year) ax.set_xlim(0,1) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_title('{} - {}'.format(city.BASE_NAME,year)) plt.tight_layout() fig.savefig(plot_dir / '{}-{}-{}.png'.format(city.BASE_NAME, 'officer_initiated_by_CFS',year)) self_initiated_by_CFS() # + def call_type_breakdown_by_quantile(data, variable, quantile_range, ax=None, subset=None ): try: agg = (data[ (data[variable] > data[variable].quantile(quantile_range[0])) & (data[variable] < data[variable].quantile(quantile_range[1])) ] .groupby('call_type') .count()[['index']] .assign(total = lambda x : x['index'].sum()) .pipe(lambda x: 100*x.div(x.total,axis=0)) ['index'].sort_values() ) return agg.plot(kind='barh',ax=ax) except: ax.text(0.5,0.5,'No data', horizontalalignment='center', verticalalignment='center') ax.set_axis_off() return ax def call_type_breakdown_by_quantile_percent(data,variable,breakdown, quantile_range,ax=None, subset=None): try: ax = (data[ (data[variable] > data[variable].quantile(quantile_range[0])) & (data[variable] < data[variable].quantile(quantile_range[1])) ] .groupby(['call_type', breakdown]) .count()[['index']] .reset_index() .pivot_table(index='call_type', values='index', columns=breakdown) .assign(total = lambda x: x.sum(axis=1)) .sort_values(by='total') .pipe(lambda x: 100*x.div(x.total,axis=0)) .drop('total', axis=1) .plot(kind='barh', stacked=True,ax=ax) # .assign(total = lambda x : x['index'].sum()) # ['index'].sort_values().plot(kind='barh') ) return ax except: ax.text(0.5,0.5,'No data', horizontalalignment='center', verticalalignment='center') ax.set_axis_off() return ax subset = ['Violent Crime','Suspicion','Drugs','Sex Offenses', 'Domestic Violence', 'Property Crime '] def plot_demographic_breakdown_summary(city,variable,year=None): fig, axs= plt.subplots(nrows=3,ncols=2, sharey='row', figsize=(20,15)) data = (city.filter_calls_by(year=year) .pipe(city.assign_demographics)) data = data[data.call_type.isin(subset)] axs= axs.flatten() call_type_breakdown_by_quantile(data, variable, quantile_range=[0,0.1],ax=axs[0], subset=subset) axs[0].set_xlim(0,8.5) axs[0].set_title('Lowest 10% of tracts by median income') axs[0].set_xlabel('') axs[0].set_ylabel('') call_type_breakdown_by_quantile_percent(data, variable, 'self_initiated', [0,0.1],ax=axs[2], subset=subset) axs[2].set_xlim(0,100) axs[2].set_xlabel('% of CFS type by officer initiated') axs[2].set_ylabel('') axs[2].set_title('') axs[2].get_legend().remove() call_type_breakdown_by_quantile_percent(data, variable, 'disposition', [0,0.1],ax=axs[4], subset=subset) axs[4].set_xlim(0,100) axs[4].set_xlabel('% of CFS type by disposition') axs[4].set_ylabel('') axs[4].set_title('') axs[4].get_legend().remove() call_type_breakdown_by_quantile(data, variable, quantile_range=[0.9,1.0],ax=axs[1], subset=subset) axs[1].set_xlim(0,8.5) axs[1].set_title('Highest 10% of tracts by median income') # axs[1].set_xlabel('') axs[1].set_xlabel('highest 10% of tracts by median income') max_x = max(axs[1].get_xlim()[1], axs[0].get_xlim()[0]) axs[0].set_xlim(0,max_x) axs[1].set_xlim(0,max_x) call_type_breakdown_by_quantile_percent(data, variable, 'self_initiated', [0.9,1.0],ax=axs[3], subset=subset) axs[3].set_xlim(0,100) axs[3].set_ylabel('') axs[3].legend(loc='upper right',title='',bbox_to_anchor=(1.25,1.0), labels=["Other", 'Officer Initiated']) call_type_breakdown_by_quantile_percent(data, variable, 'disposition', [0.9,1.0],ax=axs[5], subset=subset) axs[5].set_xlim(0,100) axs[5].set_ylabel('') axs[5].legend(loc='upper right', title='',bbox_to_anchor=(1.3,1.0),fontsize='x-large') plt.suptitle("{} - {} - {}".format(city.BASE_NAME, variable,year if year else 'All Years')) plt.tight_layout() # fig.savefig(path / "{}-{}-{}-{}.png".format(city.BASE_NAME,'demographic_quantiles',variable,year if year else 'All Years')) # - import matplotlib # + city= new_orleans variable ='median_income' year=None matplotlib.rc('xtick', labelsize=20) matplotlib.rc('ytick', labelsize=20) font = {'family' : 'DejaVu Sans', 'weight' : 'light', 'size' : 20} matplotlib.rc('font', **font) # fig, axs= plt.subplots(nrows=1,ncols=2, sharey='row', figsize=(20,5)) # data = (city.filter_calls_by(year=year) # .pipe(city.assign_demographics)) # data = data[data.call_type.isin(subset)] # axs= axs.flatten() # call_type_breakdown_by_quantile(data, variable, quantile_range=[0,0.1],ax=axs[0], subset=subset) # axs[0].set_xlim(0,8.5) # axs[0].set_title('Lowest 10% median income', fontdict={'size': 20, 'weight':'normal'}) # axs[0].set_xlabel('% of total calls', fontdict={'weight':'light'}) # axs[0].set_ylabel('') # call_type_breakdown_by_quantile(data, variable, quantile_range=[0.9,1.0],ax=axs[1], subset=subset) # axs[1].set_xlim(0,8.5) # axs[1].set_title('Highest 10% median income', fontdict={'size': 20, 'weight':'normal'}) # # axs[1].set_xlabel('') # axs[1].set_xlabel('% of calls', fontdict={'weight':'light'}) # max_x = max(axs[1].get_xlim()[1], axs[0].get_xlim()[0]) # axs[0].set_xlim(0,max_x) # axs[1].set_xlim(0,max_x) # plt.tight_layout() # fig, axs= plt.subplots(nrows=1,ncols=2, sharey='row', figsize=(20,5)) # axs= axs.flatten() # call_type_breakdown_by_quantile_percent(data, variable, 'self_initiated', [0,0.1],ax=axs[0], subset=subset) # axs[0].set_xlim(0,100) # axs[0].set_xlabel('% of calls', fontdict={'weight':'light'}) # axs[0].set_ylabel('') # axs[0].set_title('') # axs[0].get_legend().remove() # call_type_breakdown_by_quantile_percent(data, variable, 'self_initiated', [0.9,1.0],ax=axs[1], subset=subset) # axs[1].set_xlim(0,100) # axs[1].set_xlabel('% of calls', fontdict={'weight':'light'}) # axs[1].set_ylabel('Reason for call',fontdict={'weight':'light'}) # # axs[1].legend(loc='upper right',title='',bbox_to_anchor=(1.25,1.0), labels=["Other", 'Officer Initiated']) # axs[1].legend(loc='upper center', ncol=2, bbox_to_anchor=(0,1.3) ,title='Officer Initiated') # plt.tight_layout() fig, axs= plt.subplots(nrows=1,ncols=2, sharey='row', figsize=(20,5)) axs= axs.flatten() call_type_breakdown_by_quantile_percent(data, variable, 'disposition', [0,0.1],ax=axs[0], subset=subset) axs[0].set_xlim(0,100) axs[0].set_xlabel('% of calls', fontdict={'weight':'light'}) axs[0].set_ylabel('') axs[0].set_title('') axs[0].get_legend().remove() call_type_breakdown_by_quantile_percent(data, variable, 'disposition', [0.9,1.0],ax=axs[1], subset=subset) axs[1].set_xlim(0,100) axs[1].set_xlabel('% of calls',fontdict={'weight':'light'}) axs[1].set_ylabel('') # axs[1].legend(loc='upper right', title='',bbox_to_anchor=(1.25,1.0)) axs[1].legend(loc='upper center', ncol=5, bbox_to_anchor=(-0.004,1.32), title='Outcome of call') # plt.suptitle("{} - {} - {}".format(city.BASE_NAME, variable,year if year else 'All Years')) plt.tight_layout() # - plot_demographic_breakdown_summary(new_orleans,'median_income') 2 for city in [new_orleans,dallas,detroit,charleston]: demos = ['pc_white','pc_black','pc_occupied_homes','median_income','median_rent','percent_income_spent_on_rent' ] out_dir = BASE_CHARTS_DIR / 'demographics_comparison' out_dir.mkdir(exist_ok=True) for demo in demos: plot_demographic_breakdown_summary(city,demo,out_dir) for year in city.USE_YEARS: plot_demographic_breakdown_summary(new_orleans,demo,out_dir,year=year) import numpy as np import seaborn as sns # + plot_dir = BASE_CHARTS_DIR / 'correlation_coefficients' plot_dir.mkdir(exist_ok=True) def make_tract_metrics(city, call_type=None, year=None): col_volume = (city.call_volume_by_tract(norm_by='capita',call_type=call_type, year=year) .dropna() .drop('geometry',axis=1)).rename(columns={'calls':"calls_per_capita_per_year"}) try: enforcement_fraction = city.disposition_by_tract(norm_by='total', call_type=call_type, year=year)[city.ENFORCEMENT_VARIABLES].sum(axis=1) except: enforcement_fraction = np.nan try: officer_initated_fraction = city.self_initated_by_tract(norm_by='total', call_type=call_type, year=year)['Yes'] except: officer_initated_fraction = np.nan return city.assign_demographics(col_volume.assign(enforcement_fraction = enforcement_fraction, officer_initated_fraction=officer_initated_fraction)) def make_corr_plot_for_city(data, city, cut='All',metrics=['calls_per_capita_per_year', 'enforcement_fraction', 'officer_initated_fraction']): demos = ['pc_asian','pc_black','pc_employed', 'pc_hispanic', 'pc_occupied_homes', 'pc_white','median_income', 'median_rent'] corr_data = data.copy().drop(['state','county','tract','geometry'],axis=1) corr_data = corr_data.dropna(axis=1, how='all') corr_data[corr_data <0 ] = None corr_data = corr_data.dropna(how='any',axis=0) result = [] for metric in metrics: if(metric in corr_data.columns): for demo in demos: result.append( { 'c': corr_data[metric].corr(corr_data[demo]), 'city' : city, 'metric': metric,'demo':demo}) return pd.DataFrame(result) for call_type in new_orleans.clean_data().call_type.unique(): all_data = pd.DataFrame() for city in [new_orleans,detroit,dallas,charleston]: tract_data = make_tract_metrics(city, call_type=call_type).set_index('GEOID') all_data = all_data.append(make_corr_plot_for_city(tract_data, city.BASE_NAME )) # g= sns.PairGrid(all_data, x_vars = ['c','metric','city'], y_vars=['demo'], height=10, aspect=.25) plt.figure(figsize=(10,5)) demo_names =['% Asian', '% Black', '% Hispanic', '% White', '% Employed', 'Median Income', 'Median rent', '% Occupied homes'] for index, demo in enumerate(['pc_asian','pc_black','pc_hispanic','pc_white','pc_employed', 'median_income', 'median_rent','pc_occupied_homes']): plt.subplot(2,4,index+1 ) ax = sns.stripplot( data=all_data[all_data.demo == demo], x='c',y='metric', hue='city') ax.set_xlim(-1,1) if(index!=0 and index!=4): ax.set_yticklabels([]) else: ax.set_yticklabels(['Calls per capita per year','Enforcement fraction', 'Officer initiated']) plt.axvline(x=0) if(index == 3): plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) else: ax.get_legend().remove() ax.set_ylabel('') ax.set_xlabel('Correlation coefficent') plt.title(demo_names[index]) plt.tight_layout(rect=[0,0.03,1,0.95]) plt.suptitle(call_type) plt.savefig(plot_dir / slugify(call_type)) # - data = charleston.clean_data() import geopandas as gp ax =gp.read_file("/home/vera0519/vera_911/data/raw/Charleston/tracts.geojson").plot() ax.get_ylim() from shapely.geometry import Point cd[cd['LONGITUDE']<0 ] cd = pd.read_csv('/home/vera0519/vera_911/data/raw/Charleston/calls_2019.csv',encoding='latin' ) ax = gp.read_file("/home/vera0519/vera_911/data/raw/Charleston/tracts.geojson").plot() gcd = gp.GeoDataFrame(cd, geometry= cd.apply(lambda x: Point(x.LONGITUDE, x.LATITUDE),axis=1), crs={"init":"EPSG:4326"}) gcd[gcd.LONGITUDE<0].plot(ax=ax)
notebooks/Data Exploration/FinalOutput.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic Text Classification # Credits: [TensorFlow](https://www.tensorflow.org/tutorials/keras/text_classification) # ## Imports # + import matplotlib.pyplot as plt import os import re import shutil import string import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import losses from tensorflow.keras import preprocessing from tensorflow.keras.layers.experimental.preprocessing import TextVectorization # - # ## Overview # ### Sentiment Analysis # In this program we will be performing sentiment analaysis on the IMDB dataset. # ### Dataset # I'll be utilizing the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment/) which contains 50,000 reviews from the [Internet Movie Database](https://www.imdb.com/). This is split into 25,000 for training and 25,000 for testing. # ## Loading Dataset dataset_dir = "../DataSets/aclImdb" os.listdir(dataset_dir) train_dir = os.path.join(dataset_dir, 'train') os.listdir(train_dir) remove_dir = os.path.join(train_dir, 'unsup') # shutil.rmtree(remove_dir) ##Done # + batch_size = 32 seed = 42 raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory( dataset_dir+'/train', batch_size=batch_size, validation_split=0.2, subset='training', seed=seed) # - for text_batch, label_batch in raw_train_ds.take(1): for i in range(3): print("Review", text_batch.numpy()[i]) print("Label", label_batch.numpy()[i]) print("Label 0 corresponds to", raw_train_ds.class_names[0]) print("Label 1 corresponds to", raw_train_ds.class_names[1]) raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory( dataset_dir+'/train', batch_size=batch_size, validation_split=0.2, subset='validation', seed=seed) raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory( dataset_dir+'/test', batch_size=batch_size) # ## Preprocessing def custom_standardization(input_data): lowercase = tf.strings.lower(input_data) stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ') return tf.strings.regex_replace(stripped_html, '[%s]' % re.escape(string.punctuation), '') # + max_features = 10000 sequence_length = 250 vectorize_layer = TextVectorization( standardize=custom_standardization, max_tokens=max_features, output_mode='int', output_sequence_length=sequence_length) # - # Make a text-only dataset (without labels), then call adapt train_text = raw_train_ds.map(lambda x, y: x) vectorize_layer.adapt(train_text) def vectorize_text(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), label # retrieve a batch (of 32 reviews and labels) from the dataset text_batch, label_batch = next(iter(raw_train_ds)) first_review, first_label = text_batch[0], label_batch[0] print("Review", first_review) print("Label", raw_train_ds.class_names[first_label]) print("Vectorized review", vectorize_text(first_review, first_label)) print("1287 ---> ",vectorize_layer.get_vocabulary()[1287]) print(" 313 ---> ",vectorize_layer.get_vocabulary()[313]) print('Vocabulary size: {}'.format(len(vectorize_layer.get_vocabulary()))) train_ds = raw_train_ds.map(vectorize_text) val_ds = raw_val_ds.map(vectorize_text) test_ds = raw_test_ds.map(vectorize_text) # + AUTOTUNE = tf.data.experimental.AUTOTUNE train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE) # - # ## Model # + embedding_dim = 16 model = tf.keras.Sequential([ layers.Embedding(max_features + 1, embedding_dim), layers.Dropout(0.2), layers.GlobalAveragePooling1D(), layers.Dropout(0.2), layers.Dense(1)]) model.summary() # - # ## Loss func
NeuralNetworks/Perceptrons/TextClassification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Read the records from the Geom Test of Reciprocity # ### Step 0 # # Load packages # + #load all packages import datetime import pickle import copy import os from sys import argv from pathlib import Path import numpy as np import pandas as pd import pyvista as pv import matplotlib.pyplot as plt from matplotlib.colors import Normalize from scipy import signal from pyaspect.project import * from pyaspect.model.gridmod3d import gridmod3d as gm from pyaspect.model.bbox import bbox as bb from pyaspect.model.gm3d_utils import * from pyaspect.moment_tensor import MomentTensor from pyaspect.specfemio.headers import * from pyaspect.specfemio.write import * from pyaspect.specfemio.write import _write_header from pyaspect.specfemio.read import _read_headers from pyaspect.specfemio.read import * from pyaspect.specfemio.utils import * import pyaspect.events.gevents as gevents import pyaspect.events.gstations as gstations from pyaspect.events.munge.knmi import correct_station_depths as csd_f import pyaspect.events.mtensors as mtensors from obspy.imaging.beachball import beach from obspy import UTCDateTime import shapefile as sf # - # ## Set Project Paths data_in_dir = 'data/output/' data_out_dir = data_in_dir # !ls {data_out_dir}/tmp/TestProjects/CGFR_Test projects_fqp = os.path.join(data_out_dir,'tmp','TestProjects','CGFR_Test') recip_project_fqp = os.path.join(projects_fqp,'ReciprocalGeometricTestProject') fwd_project_fqp = os.path.join(projects_fqp,'ForwardGeometricTestProject') # !ls {recip_project_fqp} print() # !ls {fwd_project_fqp} # ## Define bandpass (this is just a secondary test to sos filter) (taken from scipy recipies) # + from scipy.signal import butter, lfilter def butter_bandpass(lowcut, highcut, fs, order=5): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = butter(order, [low, high], btype='band') return b, a def butter_bandpass_filter(data, lowcut, highcut, fs, order=5): b, a = butter_bandpass(lowcut, highcut, fs, order=order) y = lfilter(b, a, data) return y # - # ## Read Reciprocal Project RecordHeader and load data def _load_data(self,dtype,sl=slice(None,None,None),scale=1.0,rfunc=None): if dtype != 'b': raise Exception('can only read binary type data for the time being') l_data_x = [] l_data_y = [] l_data_z = [] for idx, row in self.stations_df.iterrows(): fp_prefix = row['data_fqdn'] fp = os.path.join(projects_fqp,fp_prefix) match_fp = fp + '.*X[XYZEN].sem*' for filepath in glob.glob(match_fp): comp = filepath.split('.')[-2][-1] fname = filepath.split("/")[-1] if comp == 'X' or comp == 'E': l_data_x.append(scale*(np.fromfile(filepath, dtype=np.float32)[sl].astype(np.float64))) elif comp == 'Y' or comp == 'N': l_data_y.append(scale*(np.fromfile(filepath, dtype=np.float32)[sl].astype(np.float64))) elif comp == 'Z': l_data_z.append(scale*(np.fromfile(filepath, dtype=np.float32)[sl].astype(np.float64))) else: raise Exception(f'Could not find component: "{comp}"') df_ne = self.stations_df.index.get_level_values('eid').nunique() df_ns = self.stations_df.index.get_level_values('sid').nunique() df_ns = self.stations_df.index.get_level_values('sid').nunique() ''' for eidx, edf in recip_record_h.stations_df.groupby(level='eid'): for sidx, sdf in edf.groupby(level='sid'): for tidx, tdf in sdf.groupby(level='trid'): for gidx, tdf in tdf.groupby(level='gid'): ''' self.stations_df['comp_EX'] = l_data_x self.stations_df['comp_NY'] = l_data_y self.stations_df['comp_Z'] = l_data_z # + import glob recip_record_fqp = os.path.join(recip_project_fqp,'pyheader.project_record') recip_record_h = _read_headers(recip_record_fqp) ne = recip_record_h.nevents ns = recip_record_h.nsrc print(f'ne:{ne}, ns:{ns}') _load_data(recip_record_h,'b',scale=1.0,sl=slice(None,-10,None)) print(f'Recip Header:\n{recip_record_h}') # - # ## function for computing the derivatives and applying the bandpass to reciprocal traces def calulate_spacial_derivative(tdf,eidx,sidx,tidx,g_p1,g_m1,sos,comp_key,coord_key): gidx_0 = pd.IndexSlice[eidx,sidx,tidx,0] gidx_p1 = pd.IndexSlice[eidx,sidx,tidx,g_p1] gidx_m1 = pd.IndexSlice[eidx,sidx,tidx,g_m1] df_0 = tdf.loc[gidx_0] df_p1 = tdf.loc[gidx_p1] df_m1 = tdf.loc[gidx_m1] data_p1 = signal.sosfilt(sos, df_p1[comp_key]) data_m1 = signal.sosfilt(sos, df_m1[comp_key]) c_p1 = df_p1[coord_key] c_m1 = df_m1[coord_key] c_0 = df_0[coord_key] delta = 0.5*(c_p1 - c_m1) h = 2.0*np.abs(delta) c = c_m1 + delta assert h != 0 assert c_0-c == 0 h_scale = 1/h mt_trace = h_scale*(data_p1 - data_m1) return mt_trace # ## Similar to cell directly above, but calculate full 9D Greens Functions # + delta = 50 comp_dict = {'comp_EX':0,'comp_NY':1,'comp_Z':2} coord_dict = {0:'lon_xc',1:'lat_yc',2:'depth'} sos = signal.butter(3, [1,10], 'bp', fs=1000, output='sos') #USE for fwd and Recip #sos = signal.butter(4, 10, 'lp', fs=1000, output='sos') #USE for fwd and Recip ne = recip_record_h.nevents ng = 9 #num sations nc = 3 #num components (x=0,y=1,z=0) nf = 3 #num force-directions (ex=0,ny=1,zup=2) nd = 3 #num direction/derivatives (d_ex=0,d_ny=1,d_zup=2) nt = 4096-10 #num samples in trace/greens function (hard coded for testing only) rgf_table = np.zeros((ne,ng,nc,nf,nd,nt),dtype=np.float64) src_df = recip_record_h.solutions_df l_trace_latlon = [] l_event_latlon = [] for eidx, edf in recip_record_h.stations_df.groupby(level='eid'): for sidx, sdf in edf.groupby(level='sid'): jdx = (eidx,sidx) if sidx == 0: l_trace_latlon.append([jdx,src_df.loc[jdx,"lon_xc"],src_df.loc[jdx,"lat_yc"]]) for tidx, tdf in sdf.groupby(level='trid'): idx = (eidx,sidx,tidx,0) if sidx == 0 and eidx == 0: l_event_latlon.append([idx,tdf.loc[idx,"lon_xc"],tdf.loc[idx,"lat_yc"]]) for comp_key in comp_dict.keys(): ie = tidx ig = eidx ic = comp_dict[comp_key] fi = sidx # dx=0,dy=1,dz=2 d = delta for di in range(3): coord_key = coord_dict[di] ip1 = di+1 #coord + h im1 = ip1 + 3 #coord - h if di == 2: tm1 = ip1 ip1 = im1 im1 = tm1 #d = -delta #FIXME: This is FUGLY! rgf_table[ie,ig,ic,fi,di,:] = calulate_spacial_derivative(tdf,eidx,sidx,tidx,ip1,im1,sos,comp_key,coord_key) #assert False print(f'l_event_latlon:') for evn in l_event_latlon: print(evn) print() print(f'l_trace_latlon:') for trc in l_trace_latlon: print(trc) # - # ## Plot and compare table # ## Read Forward Record and load data # + fwd_record_fqp = os.path.join(fwd_project_fqp,'pyheader.project_record') fwd_record_h = _read_headers(fwd_record_fqp) ne = fwd_record_h.nevents ns = fwd_record_h.nsrc print(f'ne:{ne}, ns:{ns}') #_load_data(fwd_record_h,'b',scale=1E7,sl=slice(10,None,None)) _load_data(fwd_record_h,'b',scale=1.0,sl=slice(10,None,None)) print(f'Forward Record:\n{fwd_record_h}') # - # ## Inspec moment tensors from Forward event. Will use thise for making Reciprocal Traces # + def make_moment_tensor(src_h): mrr = src_h['mrr'] mtt = src_h['mtt'] mpp = src_h['mpp'] mrt = src_h['mrt'] mrp = src_h['mrp'] mtp = src_h['mtp'] h_matrix = np.array([[mrr,mrt,mrp],[mrt,mtt,mtp],[mrp,mtp,mpp]]) return MomentTensor(m_up_south_east=h_matrix) #print(f'Forward Record Sources:\n{fwd_record_h.solutions_df}') SrcHeader = fwd_record_h.solution_cls d_fwd_src = {} for eidx, edf in fwd_record_h.solutions_df.groupby(level='eid'): for sidx, sdf in edf.groupby(level='sid'): idx = pd.IndexSlice[eidx,sidx] src = SrcHeader.from_series(fwd_record_h.solutions_df.loc[idx]) #print(src) #mag = src.mw #strike = src.strike #dip = src.dip #rake = src.rake #mt = MomentTensor(mw=mag,strike=strike,dip=dip,rake=rake) mt = make_moment_tensor(src) print(mt) d_fwd_src[eidx] = mt #print(f'mt.aki_m6:\n{mt.aki_richards_m6()}') #print(f'header.m6:\n{src.mt}\n') for key in d_fwd_src: print(d_fwd_src[key].m6_up_south_east()) # - # ## Derive the x,y, and z components for each event from the sorted reciprocity traces # + ntr = 9 ne = 7 nc = 3 nt = 4096 - 10 cmb_traces = np.zeros((ne,ntr,nc,nt),dtype=np.float64) for ie in range(ne): #print(f'mt:\n{d_fwd_src[ie]}') mw = d_fwd_src[ie].magnitude m0 = d_fwd_src[ie].moment #mt_arr = d_fwd_src[ie].m6_up_south_east()/m0 mt_arr = d_fwd_src[ie].m6_up_south_east() wzz = mt_arr[0] #mrr wyy = mt_arr[1] #mtt wxx = mt_arr[2] #mpp wyz = -mt_arr[3] #mrt wxz = mt_arr[4] #mrp wxy = -mt_arr[5] #mtp #print(f'wuu:{wzz}, wnn:{wyy}, wee:{wxx}, wnu:{wyz}, weu:{wxz}, wen:{wxy}') print(f'Mw:{mw:.2f}, M0:{m0:.2f}, wzz:{wzz:.3f}, wyy:{wyy:.3f}, wee:{wxx:.3f}, wxy:{wxy:.3f}, wxz:{wxz:.3f}, wyz:{wyz:.3f}') for it in range(ntr): icomp = 0 for comp_key in ['comp_EX','comp_NY','comp_Z']: #rgf_table[ie,it,ic, fi ,di,:] cmb_traces[ie,it,icomp,:] += wxx*1*rgf_table[ie,it, 0,icomp, 0,:] #Matrix: Mee cmb_traces[ie,it,icomp,:] += wyy*1*rgf_table[ie,it, 1,icomp, 1,:] #Matrix: Mnn cmb_traces[ie,it,icomp,:] += wzz*1*rgf_table[ie,it, 2,icomp, 2,:] #Matrix: Mzz #Matrix: M1/Mxy cmb_traces[ie,it,icomp,:] += wxy*1*rgf_table[ie,it, 1,icomp, 0,:] cmb_traces[ie,it,icomp,:] += wxy*1*rgf_table[ie,it, 0,icomp, 1,:] #Matrix: M2/Mxz cmb_traces[ie,it,icomp,:] += wxz*1*rgf_table[ie,it, 0,icomp, 2,:] cmb_traces[ie,it,icomp,:] += wxz*1*rgf_table[ie,it, 2,icomp, 0,:] #Matrix: M3/Myz cmb_traces[ie,it,icomp,:] += wyz*1*rgf_table[ie,it, 1,icomp, 2,:] cmb_traces[ie,it,icomp,:] += wyz*1*rgf_table[ie,it, 2,icomp, 1,:] icomp += 1 # - # ## bandpass the "forward" traces to match the bandpass of the reciprocal traces # + ne = 7 ntr = 9 nc = 3 nt = 4096 - 10 fwd_traces = np.zeros((ne,ntr,nc,nt)) for eidx, edf in fwd_record_h.stations_df.groupby(level='eid'): for sidx, sdf in edf.groupby(level='sid'): for tidx, tdf in sdf.groupby(level='trid'): idx = pd.IndexSlice[eidx,sidx,tidx,0] ic = 0 for comp_key in ['comp_EX','comp_NY','comp_Z']: fwd_traces[eidx,tidx,ic,:] = signal.sosfilt(sos, tdf.loc[idx,comp_key]) ic += 1 # - # ## Plot the "forward" traces (black) on top of the reciprocal constructed traces (fat-blue) # + # %matplotlib inline # #%matplotlib notebook rcomp_dict = {0:'E/X',1:'N/Y',2:'Z'} ne = 7 ntr = 9 nc = 3 nplt = ne*ntr*nc fig, axs = plt.subplots(nplt,1,figsize=(15,4*nplt)) fig.subplots_adjust(hspace=.75) ip = 0 for ie in range(ne): for itr in range(ntr): for ic in range(nc): int_cmb_traces = 0.0001*np.cumsum(cmb_traces[ie,itr,ic,:].copy()) #recip_max = np.max(np.abs(int_cmb_traces)) #comp_traces = int_cmb_traces/recip_max comp_traces = int_cmb_traces #axs[ip].plot(comp_traces,linewidth=2,linestyle='--',zorder=0,label='Recip') axs[ip].plot(comp_traces,color='gold',alpha=0.5,linestyle='-',linewidth=2,zorder=1,label='Recip') cmt_traces = fwd_traces[ie,itr,ic,:].copy() #cmt_traces /= np.max(np.abs(cmt_traces)) #cmt_traces = np.cumsum(fwd_traces[ie,itr,ic,:].copy()) # if velocity #fwd_max = np.max(np.abs(cmt_traces)) #cmt_traces /= fwd_max #print(f'fwd_max: {fwd_max}\nrecip_max: {recip_max}\nr/f: {recip_max/fwd_max}') ''' div_traces = np.zeros_like(cmt_traces) for i in range(len(div_traces)): if cmt_traces[i] != 0: div_traces[i] = comp_traces[i]/cmt_traces[i] else: div_traces[i] = 1.0 ''' #axs[ip].plot(div_traces,color='orange',linewidth=2,zorder=0,label='CMT') axs[ip].plot(cmt_traces*2.3,color='lightblue',alpha=0.5,linewidth=5,zorder=0,label='CMT') axs[ip].set_title(f'Event:{ie}, Trace:{itr}, Comp:{rcomp_dict[ic]}') ''' if itr == 3: print(f'Trace-3:\n{fwd_record_h[ie,0,itr,0]}') '''; ip += 1 #assert ip == nplt plt.show() assert False # - np.sqrt(6) np.sqrt(5) np.e 2.3**2 4/np.sqrt(2) np.sqrt() x = np.array([[2,0,0],[0,4,0],[0,0,6]]) s = 1/np.sqrt(2) y = s*np.sqrt(np.sum(x*x)) print(y) print(x/y) # + class _XYZ(object): def __init__(self,ex,ny,z): self.ex = ex self.ny = ny self.z = z ''' @property def ex(self): return self.ex @property def ny(self): return self.ny @property def z(self): return self.z ''' class Cdata(object): def __init__(self,ax,ay,az): self.ax = ax self.ay = ay self.az = az def __getitem__(self,islice): return _XYZ(self.ax[islice],self.ay[islice],self.az[islice]) @property def ex(self): return self.ax @property def ny(self): return self.ay @property def z(self): return self.az idata = Cdata(np.arange(10,20), np.arange(20,30), np.arange(30,40)) print(f'{ idata[::-1].ex == idata.ex[::-1] }') print(f'{ idata[5::2].ny == idata.ny[5::2] }') print(f'{ idata[:8:-3].z == idata.z[:8:-3] }') x = idata[::-1].ex.copy() y = idata[::-1].ex.copy() y[0] = -1 if all( x == y ): print('yep') print( x == y ) else: print('nope') # -
notebooks/BreadBoard_Consolodate_Reciprocity_MT_Construction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Batch Geocoding # # The `batch_geocode()` function in the `arcgis.geocoding` module geocodes an entire list of addresses. Geocoding many addresses at once is also known as bulk geocoding. # # <img src="/rest/geocode/api-reference/GUID-FD609701-B9B5-49DB-BFD9-A936280A09C6-web.png"> # # This method can be used to find the following types of locations: # # * Street addresses: # * 27488 Stanford Ave, Bowden, North Dakota # * 380 New York St, Redlands, CA 92373 # * Administrative place names, such as city, county, state, province, or country names: # * Seattle, Washington # * State of Mahārāshtra # * Liechtenstein # * Postal codes: # * 92591 # * TW9 1DN # # Note: # Points of interest (POI) can only be batch geocoded by using the category parameter to specify the place types to geocode. # # The addresses in your table can be stored in a single field or in multiple fields — one for each address component. Batch geocoding performance is better when the address parts are stored in separate fields. # # In this guide, we will observe: # - [Maximum addresses](#maximum-addresses) # - [Batch geocode access](#batch-geocode-access) # - [`batch_geocode()` function signature and parameters](#batch-geocode-function-signature-and-parameters) # - [`addresses` parameter](#addresses-parameter) # - [`category` parameter](#category-parameter) # - [`source_country` parameter](#source-country-parameter) # - [`out_sr` parameter](#out-sr-parameter) # - [Batch geocoding output fields](#batch-geocoding-output-fields) # - [Examples](#examples) # - [Category filtering](#category-filtering) # # <div class="alert alert-info"> # **Tip:** You can also batch geocode address tables using the gis.content.import_data() and Item.publish() methods. These are higher-level APIs that simplify the batch geocoding process. # </div> # <a id="maximum-addresses"></a> # ## Maximum addresses # # There is a limit to the maximum number of addresses that can be geocoded in a single batch request with the geocoder. The MaxBatchSize property defines this limit. For instance, if MaxBatchSize=2000, and 3000 addresses are sent as input, only the first 2000 will be geocoded. The SuggestedBatchSize property is also useful as it specifies the optimal number of addresses to include in a single batch request. # # Both of these properties can be determined by querying the geocoder: # + from arcgis.gis import GIS from arcgis.geocoding import get_geocoders, batch_geocode gis = GIS("http://www.arcgis.com", "username", "password") # use the first of GIS's configured geocoders geocoder = get_geocoders(gis)[0] # - print("MaxBatchSize : " + str(geocoder.properties.locatorProperties.MaxBatchSize)) print("SuggestedBatchSize : " + str(geocoder.properties.locatorProperties.SuggestedBatchSize)) # The client application must account for the limit by dividing the input address table into lists of MaxBatchSize or fewer addresses, and send each list to the service as a separate request. Note that the gis.content.import_data() and Item.publish() methods take care of this for you. # # For batch geocode operations, the geocoder returns a response when each address in the input recordset has been geocoded. If an unhandled error such as a timeout occurs during the process, the geocoder will not return the results for that call, even if most of the addresses in the input have already been geocoded. For this reason, the client application should implement logic to detect and handle such errors. # <a id="batch-geocode-access"></a> # ## Batch geocode access # # <p> # # <div class="alert alert-info"> # # An ArcGIS Online organizational account is required to use the batch geocoding functionality provided by the World Geocoding Service. Successfully geocoded addresses, which return a status of Matched or Tied, cause ArcGIS Online service credits to be consumed for batch geocoding operations. # </div> # <a id="batch-geocode-function-signature-and-parameters"></a> # ## batch_geocode() function signature and parameters # # The batch_geocode() function supports searching for lists of places and addresses. Each address in the list can be specified as a single line of text (single field format), or in multifield format with the address components separated into mulitple parameters. # # The code snippet below imports the geocode function and displays its signature and parameters along with a brief description: help(batch_geocode) # <a id="addresses-parameter"></a> # ### addresses parameter # # A list of addresses to be geocoded. # # For passing in the location name as a single line of text — single field batch geocoding — use a string. # # For passing in the location name as multiple lines of text — multifield batch geocoding — use the address fields described in the Geocoder documentation. # # The Geocoder provides localized versions of the input field names in all locales supported by it. See the topic Localized input field names in the Geocoder documentation for more information. # # ### Example: batch geocode using single line addresses addresses = ["380 New York St, Redlands, CA", "1 World Way, Los Angeles, CA", "1200 Getty Center Drive, Los Angeles, CA", "5905 Wilshire Boulevard, Los Angeles, CA", "100 Universal City Plaza, Universal City, CA 91608", "4800 Oak Grove Dr, Pasadena, CA 91109"] results = batch_geocode(addresses) map = gis.map("Los Angeles", 9) map # ![sample output](http://esri.github.io/arcgis-python-api/notebooks/nbimages/guide_tools_batchGeocoding_01.png) for address in results: map.draw(address['location']) # Each match has keys for score, location, attributes and address: results[0].keys() # <a id="category-parameter"></a> # ### category parameter # # A place or address type which can be used to filter batch geocoding results. The parameter supports input of single category values or multiple comma-separated values. See the help topic <a href="/rest/geocode/api-reference/geocoding-category-filtering.htm">Category filtering</a> for complete details about the category parameter. # # Example of category filtering with a single category: # # <code>category="Address"</code> # # Example of category filtering with multiple categories: # # <code>category="Address,Postal"</code> # # <a id="source-country-parameter"></a> # ### source_country parameter # # A value representing the country. When a value is passed for this parameter, all of the addresses in the input table are sent to the specified country locator to be geocoded. For example, if sourceCountry="USA" is passed in a batch_geocode() call, it is assumed that all of the addresses are in the United States, and so all of the addresses are sent to the USA country locator. Using this parameter can increase batch geocoding performance when all addresses are within a single country. # # Acceptable values include the full country name, the ISO 3166-1 2-digit country code, or the ISO 3166-1 3-digit country code. # # A list of supported countries and codes is available <a href="/rest/geocode/api-reference/geocode-coverage.htm">here</a>. # # Example: # # <code>source_country="USA"</code> # # <a id="out-sr-parameter"></a> # ### out_sr parameter # # The spatial reference of the x/y coordinates returned by the geocode method. This is useful for applications using a map with a spatial reference different than that of the geocoder. # # The spatial reference can be specified as either a well-known ID (WKID) or as a <a href="http://resources.arcgis.com/EN/HELP/REST/APIREF/GEOMETRY.HTML#SR">JSON spatial reference object</a>. If outSR is not specified, the spatial reference of the output locations is the same as that of the geocoder. The World Geocoding Service spatial reference is WGS84 (WKID = 4326). # # For a list of valid WKID values, see <a href="http://resources.arcgis.com/EN/HELP/REST/APIREF/PCS.HTML">Projected Coordinate Systems</a> and <a href="http://resources.arcgis.com/EN/HELP/REST/APIREF/GCS.HTML">Geographic Coordinate Systems</a>. # # Example (102100 is the WKID for the Web Mercator projection): # # <code>out_sr=102100</code> # # <a id="batch-geocoding-output-fields"></a> # ### Batch geocoding output fields # # When you geocode a list of addresses, the output fields are returned as part of the attributes in the response. See the example JSON response below which shows all of the output fields that are returned for each record from a batch geocode process. The output fields are described <a href="/rest/geocode/api-reference/geocoding-service-output.htm#ESRI_SECTION1_42D7D3D0231241E9B656C01438209440">here</a>. # # <a id="examples"></a> # ## Batch geocoding examples # # The earlier example showed how to call batch_geocode() with single line addresses. The following example illustrates how to call batch_geocode() with a list of multi-field addresses. # # ### Example: Batch geocode using multiple field addresses addresses= [{ "Address": "380 New York St.", "City": "Redlands", "Region": "CA", "Postal": "92373" },{ "Address": "1 World Way", "City": "Los Angeles", "Region": "CA", "Postal": "90045" }] results = batch_geocode(addresses) map = gis.map("Los Angeles", 9) map # ![sample output](http://esri.github.io/arcgis-python-api/notebooks/nbimages/guide_tools_batchGeocoding_02.png) for address in results: map.draw(address['location']) # <a id="category-filtering"></a> # ## Category filtering # # The batch_geocode() method supports batch geocode filtering by category values, which represent address and place types. By including the category parameter in a batch_geocode() call you can avoid false positive matches to unexpected place and address types due to ambiguous input. # # For example, a user has a table of three-letter airport codes that they want to geocode. There may be city or business names that are the same as an airport code, causing false positive matches to other places. However the user can ensure that only airport matches are returned by specifying category=airport in the request. # # ### Example: Batch geocode airport codes with category airports = batch_geocode(["LAX", "SFO", "ONT", "FAT", "LGB"], category="airport") map = gis.map("CA", 6) map # ![sample output](http://esri.github.io/arcgis-python-api/notebooks/nbimages/guide_tools_batchGeocoding_03.png) for airport in airports: popup = { "title" : airport['attributes']['PlaceName'], "content" : airport['address'] } map.draw(airport['location'], popup) # You can also use category filtering to avoid "low resolution" fallback matches. By default if the World Geocoding Service cannot find a match for an input address it will automatically search for a lower match level, such as a street name, city, or postal code. For batch geocoding a user may prefer that no match is returned in such cases so that they are not charged for the geocode. If a user passes category="Point Address,Street Address" in a batch_geocode() call, no fallback will occur if address matches cannot be found; the user will only be charged for the actual address matches. # # ### Example: Batch geocode with fallback allowed (no category) # # In the example below, the second address is not matched to a point address, but is matched to the city instead, due to fallback: results = batch_geocode(["380 New York St Redlands CA 92373", "27488 Stanford Dr Escondido CA"]) for result in results: print("Score " + str(result['score']) + " : " + result['address']) # ### Example: Batch geocode with no fallback allowed (category="Point Address") # In the example below, as a point address match is not found for the second address, there is no low resolution fallback as the category has been set to Point Address, and no match is returned for the second address: results = batch_geocode(["380 New York St Redlands CA 92373", "27488 Stanford Dr Escondido CA"], category="Point Address") for result in results: print("Score " + str(result['score']) + " : " + result['address'])
guide/08-finding-places-with-geocoding/batch-geocoding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + Credit: <NAME>, <NAME> # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset, DataLoader # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory from PIL import Image import os import csv from skimage import io work_dir = "/kaggle/input/birds-22wi/birds" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # - import torch print(torch.__version__) print(torch.cuda.is_available()) # + class TestImageDataSet(Dataset): def __init__(self, label_csv_file, root_dir, fileNames, transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.true_label_csv = label_csv_file self.true_label_map = {} self.fileNameList = fileNames self.root_dir = root_dir self.transform = transform # self.parseLabelsToDict() def parseLabelsToDict(self): with open(self.true_label_csv, newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: path = row["path"] classes = row["class"] self.true_label_map[path] = classes csvfile.close() def __len__(self): return len(self.fileNameList) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = os.path.join(self.root_dir, self.fileNameList[idx]) image = Image.open(img_name).convert('RGB') item = [image, self.fileNameList[idx]] if self.transform: item[0] = self.transform(item[0]) return item[0], item[1] class TrainImageDataSet(Dataset): def __init__(self, label_csv_file, root_dir, fileNames, transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.true_label_csv = label_csv_file self.true_label_map = {} self.fileNameList = fileNames self.root_dir = root_dir self.transform = transform self.parseLabelsToDict() def parseLabelsToDict(self): with open(self.true_label_csv, newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: path = row["path"] classes = row["class"] self.true_label_map[path] = int(classes) csvfile.close() def __len__(self): return len(self.fileNameList) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = os.path.join(self.root_dir, self.fileNameList[idx]) image = Image.open(img_name).convert('RGB') imageName = self.fileNameList[idx].split("/")[-1] item = [image, self.true_label_map[imageName]] if self.transform: item[0] = self.transform(item[0]) return item[0], torch.tensor(item[1]) # + def get_birds_data(): # Data augmentation transformations. Not for Testing! transform_train = transforms.Compose([ transforms.Resize(256), # Takes images smaller than 64 and enlarges them transforms.RandomCrop(256, padding=4, padding_mode='edge'), # Take 64x64 crops from 72x72 padded images transforms.RandomHorizontalFlip(), # 50% of time flip image along y-axis transforms.ToTensor(), ]) transform_test = transforms.Compose([ transforms.Resize([256,256]), transforms.ToTensor(), ]) trainFolderNames = os.listdir("/kaggle/input/birds-22wi/birds/train") trainFileNames = [] for name in trainFolderNames: imageNames = (os.listdir("/kaggle/input/birds-22wi/birds/train/" + name)) trainFileNames += ["/kaggle/input/birds-22wi/birds/train/" + name +"/"+ imageName for imageName in imageNames] trainset = TrainImageDataSet(label_csv_file="/kaggle/input/birds-22wi/birds/labels.csv", root_dir='/kaggle/input/birds-22wi/birds/train', fileNames=trainFileNames, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testFileNames = os.listdir("/kaggle/input/birds-22wi/birds/test/0") testset = TestImageDataSet(label_csv_file="/kaggle/input/birds-22wi/birds/labels.csv", root_dir='/kaggle/input/birds-22wi/birds/test/0', fileNames=testFileNames, transform=transform_test) # testset = torchvision.datasets.ImageFolder(root='/kaggle/input/birds-22wi/birds/test/', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2, pin_memory=False) return {'train': trainloader, 'test': testloader} data = get_birds_data() # + dataiter = iter(data['train']) images, labels = dataiter.next() images = images[:8] print(len(images)) print(images.size()) def imshow(img): npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() # show images imshow(torchvision.utils.make_grid(images)) # print labels print("Labels:" + ' '.join('%9s' % labels[j] for j in range(8))) flat = torch.flatten(images, 1) print(images.size()) print(flat.size()) # - #WITHOUT BIAS class ConvBNNet(nn.Module): def __init__(self): super(ConvBNNet, self).__init__() # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html self.conv1 = nn.Conv2d(3, 16, 3, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.conv2 = nn.Conv2d(16, 32, 3, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 64, 3, padding=1, bias=False) self.bn3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(64, 128, 3, padding=1, bias=False) self.bn4 = nn.BatchNorm2d(128) self.conv5 = nn.Conv2d(128, 256, 3, padding=1, bias=False) self.bn5 = nn.BatchNorm2d(256) self.conv6 = nn.Conv2d(256, 512, 3, padding=1, bias=False) self.bn6 = nn.BatchNorm2d(512) self.conv7 = nn.Conv2d(512, 1024, 3, padding=1, bias=False) self.bn7 = nn.BatchNorm2d(1024) self.fc1 = nn.Linear(1024, 555) def forward(self, x): # Input 256x256x3 x = F.max_pool2d(F.relu(self.bn1(self.conv1(x))), kernel_size=2, stride=2) # 128x128x16 x = F.max_pool2d(F.relu(self.bn2(self.conv2(x))), kernel_size=2, stride=2) # 64x64x32 x = F.max_pool2d(F.relu(self.bn3(self.conv3(x))), kernel_size=2, stride=2) # 32x32x64 x = F.max_pool2d(F.relu(self.bn4(self.conv4(x))), kernel_size=2, stride=2) # 16x16x128 x = F.max_pool2d(F.relu(self.bn5(self.conv5(x))), kernel_size=2, stride=2) # 8x8x256 x = F.max_pool2d(F.relu(self.bn6(self.conv6(x))), kernel_size=2, stride=2) # 4x4x512 x = F.max_pool2d(F.relu(self.bn7(self.conv7(x))), kernel_size=2, stride=2) # 2x2x1024 # Global average pooling across each channel (Input could be 2x2x256, 4x4x256, 7x3x256, output would always be 256 length vector) x = F.adaptive_avg_pool2d(x, 1) # 1x1x1024 x = torch.flatten(x, 1) # vector 1024 x = self.fc1(x) return x # + def train(net, dataloader, epochs=1, start_epoch=0, lr=0.01, momentum=0.9, decay=0.0005, verbose=1, print_every=10, state=None, schedule={}, checkpoint_path=None): net.to(device) net.train() losses = [] criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum, weight_decay=decay) for epoch in range(start_epoch, epochs): sum_loss = 0.0 # Update learning rate when scheduled if epoch in schedule: print ("Learning rate: %f"% schedule[epoch]) for g in optimizer.param_groups: g['lr'] = schedule[epoch] for i, batch in enumerate(dataloader, 0): inputs, labels = batch[0].to(device), batch[1].to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() # autograd magic, computes all the partial derivatives optimizer.step() # takes a step in gradient direction losses.append(loss.item()) sum_loss += loss.item() if i % print_every == print_every-1: # print every 10 mini-batches if verbose: print('[%d, %5d] loss: %.3f' % (epoch, i + 1, sum_loss / print_every)) sum_loss = 0.0 return losses def evaluate(net, dataloader): net.to(device) net.eval() count = 1 df = pd.DataFrame(columns=['path', "class"]) with torch.no_grad(): for batch in dataloader: print("batch:", count) count += 1 images = batch[0].to(device) names = batch[1] outputs = net(images) _, predicted = torch.max(outputs.data, 1) predicted = predicted.cpu() for i in range(len(predicted)): df = df.append({"path": "test/" + names[i], "class" : predicted[i].item()}, ignore_index=True) return df def smooth(x, size): return np.convolve(x, np.ones(size)/size, mode='valid') # + final_net = ConvBNNet() #Test Training #final_losses = train(final_net, data['train'], epochs=1, lr=.1 , decay=.0005) #Real Training final_losses = train(final_net, data['train'], epochs=35, lr=.1 , decay=.0005) final_losses += train(final_net, data['train'], epochs=20, lr=.01 , decay=.0005) final_losses += train(final_net, data['train'], epochs=15, lr=.001, decay=.0005) final_losses += train(final_net, data['train'], epochs=3, lr=.0001, decay=.0005) # - plt.plot(smooth(final_losses,50)) # print("Testing accuracy: %f" % accuracy(net, data['test'])) result = evaluate(final_net, data["test"]) print(result) # total = 0 # correct = 0 # with torch.no_grad(): # for i, batch in enumerate(data["train"], 0): # images, labels = batch[0].to(device), batch[1].to(device) # outputs = net(images) # _, predicted = torch.max(outputs.data, 1) # total += labels.size(0) # correct += (predicted == labels).sum().item() # print(i) result.to_csv("output.csv", index = False) len(result[result["class"] != 246])
birds-new-with-256x256-more-epoches.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="IZtnwJbt6oJT" # <img src="https://ithaka-labs.s3.amazonaws.com/static-files/images/tdm/tdmdocs/CC_BY.png" align=left alt="CC BY license logo" /><br /><br /> # Created by [<NAME>](http://nkelber.com) and Ted Lawless for [JSTOR Labs](https://labs.jstor.org/) under [Creative Commons CC BY License](https://creativecommons.org/licenses/by/4.0/)<br /> # **For questions/comments/improvements, email <EMAIL>.**<br /> # ____ # - # # Exploring Metadata and Pre-Processing # # **Description of methods in this notebook:** # This [notebook](https://docs.constellate.org/key-terms/#jupyter-notebook) shows how to explore and pre-process the [metadata](https://docs.constellate.org/key-terms/#metadata) of a [dataset](https://docs.constellate.org/key-terms/#dataset) using [Pandas](https://docs.constellate.org/key-terms/#pandas). # # The following processes are described: # # * Importing a [CSV file](https://docs.constellate.org/key-terms/#csv-file) containing the [metadata](https://docs.constellate.org/key-terms/#metadata) for a given dataset ID # * Creating a [Pandas](https://docs.constellate.org/key-terms/#pandas) dataframe to view the [metadata](https://docs.constellate.org/key-terms/#metadata) # * Pre-processing your [dataset](https://docs.constellate.org/key-terms/#dataset) by filtering out unwanted texts # * Exporting a list of relevant IDs to a [CSV file](https://docs.constellate.org/key-terms/#csv-file) # * Visualizing the metadata of your pre-processed [dataset](https://docs.constellate.org/key-terms/#dataset) by the number of documents/year and pages/year # # **Use Case:** For Learners (Detailed explanation, not ideal for researchers) # # [Take me to the **Research Version** of this notebook ->](./exploring-metadata-for-research.ipynb) # # **Difficulty:** Intermediate # # **Completion time:** 45 minutes # # **Knowledge Required:** # * Python Basics Series ([Start Python Basics I](./python-basics-1.ipynb)) # # **Knowledge Recommended:** # # * [Pandas I](./pandas-1.ipynb) # # **Data Format:** [CSV file](https://docs.constellate.org/key-terms/#csv-file) # # **Libraries Used:** # * [tdm_client](https://docs.constellate.org/key-terms/#tdm-client) to retrieve the [metadata](https://docs.constellate.org/key-terms/#metadata) in a [CSV file](https://docs.constellate.org/key-terms/#csv-file) # * [Pandas](https://docs.constellate.org/key-terms/#pandas) to manipulate and visualize the metadata # # **Research Pipeline:** None # ____ # ## Import your dataset # # We'll use the `tdm_client` library to automatically retrieve the [metadata](https://docs.constellate.org/key-terms/#metadata) for a [dataset](https://docs.constellate.org/key-terms/#dataset). We can retrieve [metadata](https://docs.constellate.org/key-terms/#metadata) in a [CSV file](https://docs.constellate.org/key-terms/#csv-file) using the `get_metadata` method. # # Enter a [dataset ID](https://docs.constellate.org/key-terms/#dataset-ID) in the next code cell. # # If you don't have a dataset ID, you can: # * Use the sample dataset ID already in the code cell # * [Create a new dataset](https://constellate.org/builder) # * [Use a dataset ID from other pre-built sample datasets](https://constellate.org/dataset/dashboard) # Creating a variable `dataset_id` to hold our dataset ID # The default dataset is Shakespeare Quarterly, 1950-present dataset_id = "7e41317e-740f-e86a-4729-20dab492e925" # Next, import the `tdm_client`, passing the `dataset_id` as an argument using the `get_metadata` method. # + tags=[] # Import the `tdm_client` import tdm_client # Pull in our dataset CSV using # The .get_metadata() method downloads the CSV file for our metadata # to the /data folder and returns a string for the file name and location # dataset_metadata will be a string containing that file name and location dataset_metadata = tdm_client.get_metadata(dataset_id) # - # We are ready to import pandas for our analysis and create a dataframe. We will use the `read_csv()` method to create our dataframe from the CSV file. # + # Import pandas import pandas as pd # Create our dataframe df = pd.read_csv(dataset_metadata) # - # We can confirm the size of our dataset using the `len()` function on our dataframe. # + tags=[] original_document_count = len(df) print(f'Total original documents: {original_document_count}') # - # Now let's take a look at the data in our dataframe `df`. We will set pandas to show all columns using `set_option()` then get a preview using `head()`. # + # Set the pandas option to show all columns # Setting None gives us all columns # To show less columns replace None with an integer pd.set_option("max_columns", None) # Set maximumum number of rows to 50 pd.set_option("max_rows", 50) # Show the first five rows of our dataframe # To show a different number of preview rows # Pass an integer into the .head() df.head() # - # Here are descriptions for the metadata types found in each column: # # |Column Name|Description| # |---|---| # |id|a unique item ID (In JSTOR, this is a stable URL)| # |title|the title for the item| # |isPartOf|the larger work that holds this title (for example, a journal title)| # |publicationYear|the year of publication| # |doi|the digital object identifier for an item| # |docType|the type of document (for example, article or book)| # |provider|the source or provider of the dataset| # |datePublished|the publication date in yyyy-mm-dd format| # |issueNumber|the issue number for a journal publication| # |volumeNumber|the volume number for a journal publication| # |url|a URL for the item and/or the item's metadata| # |creator|the author or authors of the item| # |publisher|the publisher for the item| # |language|the language or languages of the item (eng is the ISO 639 code for English)| # |pageStart|the first page number of the print version| # |pageEnd|the last page number of the print version| # |placeOfPublication|the city of the publisher| # |wordCount|the number of words in the item| # |pageCount|the number of print pages in the item| # |outputFormat|what data is available ([unigrams](https://docs.constellate.org/key-terms/#unigram), [bigrams](https://docs.constellate.org/key-terms/#bigram), [trigrams](https://docs.constellate.org/key-terms/#trigram), and/or full-text)| # # ___ # ## Filtering out columns using Pandas # # If there are any columns you would like to drop from your analysis, you can drop them with: # # `df = df.drop(['column_name1', 'column_name2', ...], axis=1)` # + # Drop each of these named columns # axis=1 specifies we are dropping columns # axis=0 would specify to drop rows df = df.drop(['outputFormat', 'pageEnd', 'pageStart', 'datePublished'], axis=1) # Show the first five rows of our updated dataframe df.head() # - # ## Filtering out rows with Pandas # # Now that we have filtered out unwanted metadata columns, we can begin filtering out any texts that may not match our research interests. Let's examine the first and last ten rows of the dataframe to see if we can identify texts that we would like to remove. We are looking for patterns in the metadata that could help us remove many texts at once. # Preview the first ten items in the dataframe # Can you identify patterns to select rows to remove? df.head(10) # Preview the last ten items in the dataframe # Can you identify patterns to select rows to remove? df.tail(10) # + # We create a function to report how many documents were removed. def texts_report(): """Prints out a report of: 1. How many documents were removed 2. The total original number of documents 3. The total current number of documents """ total_dropped = before_count - len(df) print(f'{total_dropped} texts were removed.') print(f'Total original documents: {original_document_count}') print('Total current documents: ', len(df)) # - # ### Remove all rows without data for a particular column # # For example, we may wish to remove any texts that do not have authors. (In the case of journals, this may be helpful for removing paratextual sections such as the table of contents, indices, etc.) The column of interest in this case is `creator`. # + # Remove all texts without an author print('Removing texts without authors...', end='') before_count = len(df) df = df.dropna(subset=['creator']) #drop each row that has no value under 'creators' # Report the number of texts removed texts_report() # - # ### Remove row based on the content of a particular column # # We can also remove texts, depending on whether we do (or do not) want a particular value in a column. Here are a few examples. # + # Remove all items with a particular title # Change title to desired column # Change `Review Article` to your undesired title title_to_remove = 'Review Article' # Removing texts print(f'Removing texts with title "{title_to_remove}"...', end='') before_count = len(df) df = df[df.title != title_to_remove] # Report the number of texts removed texts_report() # + # Keep only items with a particular language # Change language to desired column # Change 'eng' to your desired language language = 'eng' # Removing texts print(f'Removing texts not in "{language}" language...') before_count = len(df) df = (df[df.language == language]) # Change to another language code for other languages # Report the number of texts removed texts_report() # + # Remove all items with less than 1500 words # Change wordCount to desired column # Change `min_word_count to your desired expression to evaluate min_word_count = 1500 # Removing texts print(f'Removing texts with fewer than {min_word_count} words...') before_count = len(df) df = df[df.wordCount > min_word_count] # Report the number of texts removed texts_report() # - # Take a final look at your dataframe to make sure the current texts fit your research goals. In the next step, we will save the IDs of your pre-processed dataset. # Preview the first 50 rows of your dataset # If all the items look good, move to the next step. df.head(50) # ## Saving a list of IDs to a CSV file # Write the column "id" to a CSV file called `pre-processed_###.csv` where ### is the `dataset_id` df["id"].to_csv('data/pre-processed_' + dataset_id + '.csv') # Download the "pre-processed_###.csv" file (where ### is the `dataset_id`) for future analysis. You can use this file in combination with the dataset ID to automatically filter your texts and reduce the processing time of your analyses. # --- # ## Visualizing the Pre-Processed Data # For displaying plots # %matplotlib inline # + colab={} colab_type="code" id="upxLwnZr6oJs" # Group the data by publication year and the aggregated number of ids into a bar chart df.groupby(['publicationYear'])['id'].agg('count').plot.bar(title='Documents by year', figsize=(20, 5), fontsize=12); # Read more about Pandas dataframe plotting here: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html # + [markdown] colab_type="text" id="fNp2wN8I6oJt" # And now let's look at the total page numbers by year. # + colab={} colab_type="code" id="uifT8Ocy6oJu" # Group the data by publication year and aggregated sum of the page counts into a bar chart df.groupby(['publicationYear'])['pageCount'].agg('sum').plot.bar(title='Pages by decade', figsize=(20, 5), fontsize=12);
exploring-metadata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import mlxtend from mlxtend.evaluate import confusion_matrix from mlxtend.plotting import plot_confusion_matrix import pandas_ml from pandas_ml import ConfusionMatrix import os from sklearn.utils import shuffle import glob import sys import keras from keras.datasets import mnist from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Flatten, Activation, Input from keras.layers import Conv2D, MaxPooling2D from keras import backend as K from vis.utils import utils from vis.visualization import visualize_activation from keras import activations ################### DATASET HANDLING #################### DATASET_PATH = "/home/dic/jupyter/train_Binary" #change the path to your dataset folder here def parseDataset(): #we use subfolders as class labels classes = [folder for folder in sorted(os.listdir(DATASET_PATH))] #now we enlist all image paths images = [] for c in classes: images += ([os.path.join(DATASET_PATH, c, path) for path in os.listdir(os.path.join(DATASET_PATH, c))]) #print(images) #shuffle image paths images = shuffle(images, random_state=74) #we want to use a 15% validation split total_len=len(images) print("total:",total_len) vsplit = int(len(images) * 0.70) #=40 print("vsplit:",vsplit) tsplit= int(total_len-vsplit) print("tsplit:",tsplit) gsplit=int(tsplit/2) print(gsplit) msplit=int(vsplit+gsplit) train = images[:vsplit] #everything except the last vsplit items in the array val = images[vsplit:msplit] #only last vsplit items from the array test=images[msplit:] #show some stats print ("CLASS LABELS:", classes) print ("TRAINING IMAGES:", len(train)) print ("VALIDATION IMAGES:", len(val)) print ("TEST IMAGES:", len(test)) return classes, train, val,test #parse dataset CLASSES, TRAIN, VAL,TEST = parseDataset() import os import time from sklearn.utils import shuffle import cv2 import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix import itertools import lasagne from lasagne import random as lasagne_random from lasagne import layers from lasagne.nonlinearities import softmax, tanh from lasagne import objectives from lasagne import updates import theano import theano.tensor as T import warnings warnings.filterwarnings("ignore") from lasagne import layers from lasagne.nonlinearities import softmax, tanh ################## BUILDING THE MODEL ################### def buildModel(): #this is our input layer with the inputs (None, dimensions, width, height) l_input = layers.InputLayer((None, 3, 22, 23)) #first convolutional layer, has l_input layer as incoming and is followed by a pooling layer l_conv1 = layers.Conv2DLayer(l_input, num_filters=32, filter_size=3, nonlinearity=lasagne.nonlinearities.tanh) #l_pool = layers.MaxPool2DLayer(l_conv1, pool_size=2) l_drop1= layers.DropoutLayer(l_conv1, p=0.1) #l_dense1 = layers.DenseLayer(l_drop1, num_units=128) #l_conv2 = layers.Conv2DLayer(l_drop1, num_filters=128, filter_size=5, nonlinearity=lasagne.nonlinearities.rectify) #l_conv3 = layers.Conv2DLayer(l_conv2, num_filters=256, filter_size=5, nonlinearity=lasagne.nonlinearities.rectify) l_dense2 = layers.DenseLayer(l_drop1, num_units=128) l_drop2= layers.DropoutLayer(l_dense2, p=0.25) l_dense2 = layers.DenseLayer(l_drop2, num_units=128) l_output = layers.DenseLayer(l_dense2, num_units=2, nonlinearity=lasagne.nonlinearities.softmax) #let's see how many params our net has print ("MODEL HAS", layers.count_params(l_output), "PARAMS") #we return the layer stack as our network by returning the last layer return l_output NET = buildModel() from lasagne import objectives import theano import theano.tensor as T #################### LOSS FUNCTION ###################### def calc_loss(prediction, targets): #categorical crossentropy is the best choice for a multi-class softmax output l = T.mean(objectives.categorical_crossentropy(prediction, targets)) return l #theano variable for the class targets #this is the output vector the net should predict targets = T.matrix('targets', dtype=theano.config.floatX) #get the network output prediction = layers.get_output(NET) #calculate the loss loss = calc_loss(prediction, targets) ################# ACCURACY FUNCTION ##################### def calc_accuracy(prediction, targets): #we can use the lasagne objective categorical_accuracy to determine the top1 accuracy a = T.mean(objectives.categorical_accuracy(prediction, targets, top_k=1)) return a accuracy = calc_accuracy(prediction, targets) from lasagne import updates ####################### UPDATES ######################### #get all trainable parameters (weights) of our net params = layers.get_all_params(NET, trainable=True) #we use the adam update #it changes params based on our loss function with the learning rate param_updates = updates.adam(loss, params, learning_rate=0.0002) import theano #################### TRAIN FUNCTION ###################### #the theano train functions takes images and class targets as input #it updates the parameters of the net and returns the current loss as float value #compiling theano functions may take a while, you might want to get a coffee now... print ("COMPILING THEANO TRAIN FUNCTION...",) train_net = theano.function([layers.get_all_layers(NET)[0].input_var, targets], loss, updates=param_updates) print ("DONE!") ################# PREDICTION FUNCTION #################### #we need the prediction function to calculate the validation accuracy #this way we can test the net after training #first we need to get the net output net_output = layers.get_output(NET) #now we compile another theano function; this may take a while, too print ("COMPILING THEANO TEST FUNCTION...",) test_net = theano.function([layers.get_all_layers(NET)[0].input_var, targets], [net_output, loss, accuracy]) print ("DONE!") import cv2 import numpy as np #################### BATCH HANDLING ##################### def loadImageAndTarget(path): #print(path) #here we open the image and scale it to 64x64 pixels img = cv2.imread(path) #print(path) img = cv2.resize(img, (22, 23)) #OpenCV uses BGR instead of RGB, but for now we can ignore that #our image has the shape (64, 64, 3) but we need it to be (3, 64, 64) img = np.transpose(img, (2, 1, 0)) #we want to use subfolders as class labels label = path.split(os.sep[-1])[-2] #print(label) #we need to get the index of our label from CLASSES index = CLASSES.index(label) #allocate array for target target = np.zeros((2), dtype='float32') #we set our target array = 1.0 at our label index, all other entries remain zero #Example: if label = dog and dog has index 2 in CLASSES, target looks like: [0.0, 0.0, 1.0, 0.0, 0.0] target[index] = 1.0 #we need a 4D-vector for our image and a 2D-vector for our targets #we can adjust array dimension with reshape img = img.reshape(-1, 3, 22, 23) target = target.reshape(-1, 2) return img, target #a reasonable size for one batch is 128 BATCH_SIZE = 200 def getDatasetChunk(split): #get batch-sized chunks of image paths for i in range(0, len(split), BATCH_SIZE): yield split[i:i+BATCH_SIZE] def getNextImageBatch(split=TRAIN): #allocate numpy arrays for image data and targets #input shape of our ConvNet is (None, 3, 22, 23) x_b = np.zeros((BATCH_SIZE, 3, 22, 23), dtype='float32') #output shape of our ConvNet is (None, 5) as we have 5 classes y_b = np.zeros((BATCH_SIZE, 2), dtype='float32') #fill batch for chunk in getDatasetChunk(split): ib = 0 for path in chunk: #load image data and class label from path x, y = loadImageAndTarget(path) #pack into batch array x_b[ib] = x y_b[ib] = y ib += 1 #instead of return, we use yield yield x_b[:len(chunk)], y_b[:len(chunk)] def getNextImageBatch1(split=TEST): #allocate numpy arrays for image data and targets #input shape of our ConvNet is (None, 3, 22, 23) x_T = np.zeros((BATCH_SIZE, 3, 22, 23), dtype='float32') #output shape of our ConvNet is (None, 5) as we have 5 classes y_T = np.zeros((BATCH_SIZE, 2), dtype='float32') #fill batch for chunk in getDatasetChunk(split): iT = 0 for path in chunk: #load image data and class label from path x1, y1 = loadImageAndTarget(path) #pack into batch array x_T[iT] = x1 y_T[iT] = y1 iT += 1 #instead of return, we use yield yield x_T[:len(chunk)], y_T[:len(chunk)] cmatrix = [] def clearConfusionMatrix(): global cmatrix #allocate empty matrix of size 5x5 (for our 5 classes) cmatrix = np.zeros((len(CLASSES), len(CLASSES)), dtype='int32') def updateConfusionMatrix(t, p): global cmatrix cmatrix += confusion_matrix(np.argmax(t, axis=1), np.argmax(p, axis=1)) def showConfusionMatrix(): #new figure plt.figure(1) plt.clf() #show matrix plt.imshow(cmatrix, interpolation='nearest', cmap=plt.cm.Blues) plt.title('Confusion Matrix') plt.colorbar() #tick marks tick_marks = np.arange(len(CLASSES)) plt.xticks(tick_marks, CLASSES) plt.yticks(tick_marks, CLASSES) #labels thresh = cmatrix.max() / 2. for i, j in itertools.product(range(cmatrix.shape[0]), range(cmatrix.shape[1])): plt.text(j, i, cmatrix[i, j], horizontalalignment="center", color="white" if cmatrix[i, j] > thresh else "black") #axes labels plt.ylabel('Target label') plt.xlabel('Predicted label') #show plt.show() plt.pause(0.5) import matplotlib.pyplot as plt ##################### STAT PLOT ######################### plt.ion() def showChart(epoch, t, v, a): #new figure plt.figure(0) plt.clf() #x-Axis = epoch e = range(0, epoch) #loss subplot plt.subplot(211) plt.plot(e, train_loss, 'r-', label='Train Loss') plt.plot(e, val_loss, 'b-', label='Val Loss') plt.ylabel('loss') #show labels plt.legend(loc='upper right', shadow=True) #accuracy subplot plt.subplot(212) plt.plot(e, val_accuracy, 'g-') plt.ylabel('accuracy') plt.xlabel('epoch') #show plt.show() plt.pause(0.5) import time import sklearn import numpy import matplotlib.pyplot as plt import numpy as np ###################### TRAINING ######################### print ("START TRAINING...") train_loss = [] val_loss = [] val_accuracy = [] for epoch in range(1,10): #start timer start = time.time() #reset confusion matrix clearConfusionMatrix() #iterate over train split batches and calculate mean loss for epoch t_l = [] for image_batch, target_batch in getNextImageBatch(): #calling the training functions returns the current loss l = train_net(image_batch, target_batch) t_l.append(l) #we validate our net every epoch and pass our validation split through as well v_l = [] v_a = [] for image_batch, target_batch in getNextImageBatch(VAL): #calling the test function returns the net output, loss and accuracy prediction_batch, l, a = test_net(image_batch, target_batch) v_l.append(l) v_a.append(a) #save predicions and targets for confusion matrix updateConfusionMatrix(target_batch,prediction_batch) #mlxtend confusion matrix #cm=confusion_matrix(target_batch,prediction_batch) # fig,ax=plot_confusion_matrix(conf_mat=cm) # plt.show() #pandas_ml confusion matrix #confusion_matrix1 = ConfusionMatrix(target_batch,prediction_batch) #print("Confusion matrix:\n%s" % confusion_matrix1) #stop timer end = time.time() #calculate stats for epoch train_loss.append(np.mean(t_l)) val_loss.append(np.mean(v_l)) val_accuracy.append(np.mean(v_a)) #print stats for epoch print ("EPOCH:", epoch,) print ("TRAIN LOSS:", train_loss[-1],) print ("VAL LOSS:", val_loss[-1],) print ("VAL ACCURACY:", (int(val_accuracy[-1] * 1000) / 10.0), "%",) print ("TIME:", (int((end - start) * 10) / 10.0), "s") #show chart showChart(epoch, train_loss, val_loss, val_accuracy) #show confusion matrix showConfusionMatrix() #show best accuracy and epoch print ("BEST VAL ACCURACY:", (int(max(val_accuracy) * 1000) / 10.0), "%", "EPOCH:", val_accuracy.index(max(val_accuracy)) + 1) #####TESTING DATA####### test_err = 0 test_acc = 0 test_batches = 0 for image_batch, target_batch in getNextImageBatch1(): test_batch, err, acc = test_net(image_batch, target_batch) test_err += err test_acc += acc test_batches += 1 print("Final results:") print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches)) print(" test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100)) print ("TRAINING DONE!") # - # + from vis.visualization import visualize_activation from vis.utils import utils from keras import activations from matplotlib import pyplot as plt # %matplotlib inline plt.rcParams['figure.figsize'] = (18, 6) # Utility to search for layer index by name. # Alternatively we can specify this as -1 since it corresponds to the last layer. layer_idx = utils.find_layer_idx(NET, 'l_output') # Swap softmax with linear NET.layers[layer_idx].activation = activations.linear NET = utils.apply_modifications(NET) # This is the output node we want to maximize. filter_idx = 0 img = visualize_activation(NET, layer_idx, filter_indices=filter_idx) plt.imshow(img[..., 0]) # - from sklearn.utils.class_weight import compute_sample_weight y = ['ACEP', 'CEP', 'DSCT', 'ECL', 'LPV', 'RRLYR', 'T2CEP'] compute_sample_weight(class_weight='unbalanced', y=y) # + def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') cm = confusion_matrix(y_true, y_pred) np.set_printoptions(precision=2) plt.figure() plot_confusion_matrix(cm, classes=[0, 1], title='Confusion matrix') # - # + # -
all_codes/cnn/cnn_final_try1-ORIGINAL-TEST_ACCURACY_CONFMAT_shallow_network_CRTS_cep_ecl-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # 保存模型 import tensorflow as tf import numpy as np m = tf.Variable([[1., 2.],[3., 4.]], name = 'mydata_m') b = tf.Variable([[5., 6.],[7., 7.]], name = 'mydata_b') #np = tf.constant(np.arange(1, 13, dtype=np.int32),shape=[3,4]) init=tf.global_variables_initializer() #saver = tf.train.Saver(savable_variables, max_to_keep=n, keep_checkpoint_every_n_hours=m) saver = tf.train.Saver() sess = tf.Session() sess.run(init) #print(sess.run(m)) print(m.name) print(b.name) #saver.save(sess, 'model_name', global_step=step,write_meta_graph=False) saver.save(sess,"c:/liujh168.ckpt") sess.close() # + import tensorflow as tf import numpy as np saver = tf.train.import_meta_graph('c:/liujh16.ckpt.meta') # 将模型定义的graph加载进来 #saver.restore(sess, os.path.join(path, 'model.ckpt-1000')) # #使用Restore的模型 sess = tf.Session() saver.restore(sess, tf.train.latest_checkpoint('c:/')) #加载指定文件夹下最近保存的一个模型的数据 m = tf.get_default_graph().get_tensor_by_name('mydata_m:0') b = tf.get_default_graph().get_tensor_by_name('mydata_b:0') print(m, b) #print(sess.run([m, b])) print(sess.run(m)) print("this is from model saved!") print(sess.run(b)) stopm = tf.stop_gradient('mydata_m:0') #将模型的一部分进行冻结 #vs = [v for v in tf.trainable_variables()] #vs = [v for v in tf.global_variables()] #for v in vs: # print(v) # print(v.name) # print(sess.run(v)) #ops = [o for o in sess.graph.get_operations()] #for o in ops: # print(o.name) sess.close() # -
save_restore_tf_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import numpy as np import pyclesperanto_prototype as cle cle.get_device() # - # Our starting point is a label image and another label image, where some of the labels in the first image are selected from. label_image = cle.artificial_tissue_2d() cle.imshow(label_image, labels=True) random_vector = np.random.random((1, int(label_image.max() + 1))) sparse_labels = cle.exclude_labels_with_values_out_of_range(random_vector, label_image, minimum_value_range=0, maximum_value_range=0.3) cle.imshow(sparse_labels, labels=True) # We now count for every label in `label_image`, how many labels are proximal to it in the `sparse_labels` image. For measuring the distance, we use the centroid distance. count_map = cle.proximal_other_labels_count_map(label_image, sparse_labels, maximum_distance=25) cle.imshow(count_map)
demo/neighbors/other_proximal_labels_count.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="https://cybersecurity-excellence-awards.com/wp-content/uploads/2017/06/366812.png"> # <h1><center>Darwin Supervised Classification Model Building </center></h1> # Prior to getting started, there are a few things you want to do: # 1. Set the dataset path. # 2. Enter your username and password to ensure that you're able to log in successfully # # Once you're up and running, here are a few things to be mindful of: # 1. For every run, look up the job status (i.e. requested, failed, running, completed) and wait for job to complete before proceeding. # 2. If you're not satisfied with your model and think that Darwin can do better by exploring a larger search space, use the resume function. # ## Import libraries # + # Import necessary libraries import warnings warnings.filterwarnings("ignore", message="numpy.dtype size changed") # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd from IPython.display import Image from time import sleep import os import numpy as np from sklearn.metrics import classification_report from sklearn.utils import resample from amb_sdk.sdk import DarwinSdk # - # ## Setup # **Login to Darwin**<br> # Enter your registered username and password below to login to Darwin. # + # Login ds = DarwinSdk() ds.set_url('https://amb-demo-api.sparkcognition.com/v1/') status, msg = ds.auth_login_user('<EMAIL>', 'aDeuVK8S2v') if not status: print(msg) # - # **Data Path** <br> # In the cell below, set the path to your dataset, the default is Darwin's example datasets path = '../../sets/' # ## Data Upload and Clean # **Read dataset and view a file snippet** # After setting up the dataset path, the next step is to upload the dataset from your local device to the server. <br> In the cell below, you need to specify the dataset_name if you want to use your own data. dataset_name = 'pha_danger_upsampled.csv' df = pd.read_csv(os.path.join(path, dataset_name)) ''' We realized from the previous feature engineering that there was a huge class imbalance. The below code helps fix the class imbalance. df.danger.value_counts() # told us there were 1481 records classified as yes and 18 records classified as no -- huge class imbalance # Separate majority and minority class df_minority = df[df.danger == 'No'] df_majority = df[df.danger == 'Yes'] # Upsample minority class df_minority_upsampled = resample(df_minority, replace=True, n_samples=1481, random_state=123) # Combine majority class with upsampled minority class df_upsampled = pd.concat([df_majority, df_minority_upsampled]) df_upsampled.danger.value_counts() # shows that both have records yes and no of quantity 1481 -- perfect! df_upsampled.to_csv('../../sets/pha_danger_upsampled.csv') # only needs to be done once, to upload the dataset ''' df.head() # **Upload dataset to Darwin** # Upload dataset ds.delete_dataset(dataset_name) status, dataset = ds.upload_dataset(os.path.join(path, dataset_name)) if not status: print(dataset) # **Clean dataset** # + # clean dataset target = "danger" status, job_id = ds.clean_data(dataset_name, target = target) if status: ds.wait_for_job(job_id['job_name']) else: print(job_id) # - # ## Create and Train Model # We will now build a model that will learn the class labels in the target column.<br> In the default cancer dataset, the target column is "Diagnosis". <br> You will have to specify your own target name for your custom dataset. <br> You can also increase max_train_time for longer training. # model = target + "_model0" ds.delete_model(model) status, job_id = ds.create_model(dataset_names = dataset_name, \ model_name = model, \ max_train_time = '00:02') if status: ds.wait_for_job(job_id['job_name']) else: print(job_id) # ## Extra Training (Optional) # Run the following cell for extra training, no need to specify parameters # + # Train some more status, job_id = ds.resume_training_model(dataset_names = dataset_name, model_name = model, max_train_time = '00:05') if status: ds.wait_for_job(job_id['job_name']) else: print(job_id) # - # ## Analyze Model # Analyze model provides feature importance ranked by the model. <br> It indicates a general view of which features pose a bigger impact on the model # Retrieve feature importance of built model status, artifact = ds.analyze_model(model) sleep(1) if status: ds.wait_for_job(artifact['job_name']) else: print(artifact) status, feature_importance = ds.download_artifact(artifact['artifact_name']) # Show the 10 most important features of the model. feature_importance[:] # ## Predictions # **Perform model prediction on the the training dataset.** status, artifact = ds.run_model(dataset_name, model) sleep(1) ds.wait_for_job(artifact['job_name']) # Download predictions from Darwin's server. status, prediction = ds.download_artifact(artifact['artifact_name']) prediction.head() # Create plots comparing predictions with actual target unq = prediction[target].unique()[::-1] p = np.zeros((len(prediction),)) a = np.zeros((len(prediction),)) for i,q in enumerate(unq): p += i*(prediction[target] == q).values a += i*(df[target] == q).values #Plot predictions vs actual plt.plot(a) plt.plot(p) plt.legend(['Actual','Predicted']) plt.yticks([i for i in range(len(unq))],[q for q in unq]); print(classification_report(df[target], prediction[target])) # **Perform model prediction on a test dataset that wasn't used in training.** <br> # Upload test dataset test_data = 'pha_danger_test.csv' ds.delete_dataset(test_data) status, dataset = ds.upload_dataset(os.path.join(path, test_data)) if not status: print(dataset) # clean test data # + # clean test dataset status, job_id = ds.clean_data(test_data, target = target, model_name = model) if status: ds.wait_for_job(job_id['job_name']) else: print(job_id) # - # Run model on test dataset. status, artifact = ds.run_model(test_data, model) sleep(1) ds.wait_for_job(artifact['job_name']) # Create plots comparing predictions with actual target # Create plots comparing predictions with actual target status, prediction = ds.download_artifact(artifact['artifact_name']) df = pd.read_csv(os.path.join(path,test_data)) unq = prediction[target].unique()[::-1] p = np.zeros((len(prediction),)) a = np.zeros((len(prediction),)) for i,q in enumerate(unq): p += i*(prediction[target] == q).values a += i*(df[target] == q).values #Plot predictions vs actual plt.plot(a) plt.plot(p) plt.legend(['Actual','Predicted']) plt.yticks([i for i in range(len(unq))],[q for q in unq]); print(classification_report(df[target], prediction[target])) # ## Find out which machine learning model did Darwin use: status, model_type = ds.lookup_model_name(model) print(model_type['description']['best_genome']) # ### Outcome # It seems that the model we built ended up using an LinearGene. This is isn't one of the models that we'd predicted. # #### Did this solve the problem? # Overall, we believe that we did solve the problem. This is a system that can help predict the danger of new near-earth objects. We reduced our overfitting issue, and additionally increased from the base accuracy as well. It doesn't seem to be too accurate, which is a good thing. It would benefit from testing and training on more non-dangerous records. The macro avg for f-score seems to be pretty good, so we will take that as a good measure of success. More work can be done to reduce overfitting, but it requires more data.
examples/Trial/Asteroid Classification (Danger-UpSampled).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import transforms, datasets, utils import matplotlib.pyplot as plt # %matplotlib inline # - mnist_data = datasets.MNIST(root='.', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])) mnist_data_loader = torch.utils.data.DataLoader(mnist_data, batch_size=128, drop_last=True, shuffle=True) # + class DiscriminatorNet(torch.nn.Module): """ A three hidden-layer discriminative neural network """ def __init__(self): super(DiscriminatorNet, self).__init__() n_features = 784 n_out = 1 self.hidden0 = nn.Sequential( nn.Linear(n_features, 1024), nn.LeakyReLU(0.2), nn.Dropout(0.3) ) self.hidden1 = nn.Sequential( nn.Linear(1024, 512), nn.LeakyReLU(0.2), nn.Dropout(0.3) ) self.hidden2 = nn.Sequential( nn.Linear(512, 256), nn.LeakyReLU(0.2), nn.Dropout(0.3) ) self.out = nn.Sequential( torch.nn.Linear(256, n_out), torch.nn.Sigmoid() ) def forward(self, x): x = self.hidden0(x) x = self.hidden1(x) x = self.hidden2(x) x = self.out(x) return x discriminator = DiscriminatorNet() # + class GeneratorNet(torch.nn.Module): """ A three hidden-layer generative neural network """ def __init__(self): super(GeneratorNet, self).__init__() n_features = 100 n_out = 784 self.hidden0 = nn.Sequential( nn.Linear(n_features, 256), nn.LeakyReLU(0.2) ) self.hidden1 = nn.Sequential( nn.Linear(256, 512), nn.LeakyReLU(0.2) ) self.hidden2 = nn.Sequential( nn.Linear(512, 1024), nn.LeakyReLU(0.2) ) self.out = nn.Sequential( nn.Linear(1024, n_out), nn.Tanh() ) def forward(self, x): x = self.hidden0(x) x = self.hidden1(x) x = self.hidden2(x) x = self.out(x) return x generator = GeneratorNet() # - d_optimizer = optim.Adam(discriminator.parameters(), lr=2e-4) g_optimizer = optim.Adam(generator.parameters(), lr=2e-4) def train_discriminator(discriminator, optimizer, real_data, fake_data): input_data = torch.cat([real_data, fake_data], dim=0) targets = torch.cat([torch.ones(real_data.size(0), 1), torch.zeros(fake_data.size(0), 1)], dim=0) preds = discriminator(input_data) loss = F.binary_cross_entropy(preds, targets) optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() def train_generator(optimizer, fake_data): preds = discriminator(fake_data) loss = F.binary_cross_entropy(preds, torch.ones(fake_data.size(0), 1)) optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() # + n_epochs = 200 n_test_samples = 16 test_noise = torch.randn(n_test_samples, 100) n_batches = len(mnist_data_loader) avg_d_losses = [] avg_g_losses = [] for i_epoch in range(n_epochs): running_d_loss = 0. running_g_loss = 0. for n_batch, (real_batch,_) in enumerate(mnist_data_loader): real_data = real_batch.view(-1, 784) fake_data = generator(torch.randn(real_data.size(0), 100)) # Train D running_d_loss += train_discriminator(discriminator, d_optimizer, real_data, fake_data.detach()) # Train G running_g_loss += train_generator(g_optimizer, fake_data) avg_d_losses.append(running_d_loss / n_batches) avg_g_losses.append(running_g_loss / n_batches) if i_epoch == 0 or (i_epoch + 1) % 10 == 0: with torch.no_grad(): test_images = generator(test_noise).view(-1, 1, 28, 28) utils.save_image(test_images, "test_samples/epoch_{:d}.png".format(i_epoch), nrow=8, normalize=True, range=(-1, 1)) print("Epoch {:d}".format(i_epoch)) print("=====================") print(" Discriminator Loss: {:f}".format(avg_d_losses[-1])) print(" Generator Loss: {:f}".format(avg_g_losses[-1])) print("---------------------") print() plt.imshow(utils.make_grid(test_images, nrow=8, normalize=True, range=(-1, 1)).permute(1, 2, 0)); # - # ---
SimpleGANs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pyspark # language: python # name: pyspark # --- # # Data Generator # # Generating more realistic data # + # helper functions from datagen_helper import generate_pd_df import pandas as pd # + df_pd = generate_pd_df(20) df_pd.to_csv('transactions-sample.csv', sep=',', index=False) print (f'wrote to "transactions-sample.csv"') df_pd.head(10) # - # ## Create a Spark DataFrame # + ## Init Spark import os, sys this_dir = os.path.abspath('') parent_dir = os.path.dirname(this_dir) grand_parent_dir = os.path.dirname(parent_dir) if parent_dir not in sys.path: sys.path.append(parent_dir) from init_spark import init_spark spark = init_spark() # + import pyspark from pyspark.sql.types import ArrayType, IntegerType, LongType, StringType, FloatType, TimestampType, StructType, StructField my_schema = StructType([ StructField("id", StringType(), True), StructField("timestamp", StringType(), True), StructField("mti", StringType(), True), StructField("card_number", StringType(), True), StructField("amount_customer", FloatType(), True), StructField("merchant_type", StringType(), True), StructField("merchant_id", StringType(), True), StructField("merchant_address", StringType(), True), StructField("ref_id", StringType(), True), StructField("amount_merchant", FloatType(), True), StructField("response_code", StringType(), True), ]) spark_df = spark.createDataFrame(df_pd, schema=my_schema) spark_df.show(truncate=True) # -
data-generator/datagen-tx-small-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={} import submitter # + [markdown] pycharm={"metadata": false} # Now we submit a script that we want the grader/tester to run. # The boolean tells the spawner service if it should force the run. Otherwise it will return the results of the previous run. # This is handled by creating a hash of the .py file that is created from the .ipynb, and storing the results using the hash # as the key. # + pycharm={"metadata": false, "name": "#%%\n"} submitter.submit("./project.ipynb", "assignment1", True) # + [markdown] pycharm={"metadata": false} # The above should return the stdout of the file submitted. The output should look like: # ``` # http://jupyter-spawner.jhub.svc.cluster.local:80/notebook/run?uid=dd&adr=10.44.4.70&prt=9999&frc=True # serving at port 9999 # 10.44.2.67 - - [07/Apr/2019 20:12:34] "POST / HTTP/1.1" 200 - # hello world # ``` # The response returned by the function is a dictionary that was returned by the spawner service. # #
jhub-spawner-client/examples/submit_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="FWbqi0Quujrn" colab={"base_uri": "https://localhost:8080/"} outputId="143e52c2-8989-4ab0-cee1-1cd0fd2fc308" import glob import os import librosa import numpy as np # !pip install pretty_midi import pretty_midi # + id="LWcVnJR1uogZ" colab={"base_uri": "https://localhost:8080/"} outputId="916772cc-3542-4f23-be88-05c6b201d396" from google.colab import drive drive.mount('/content/drive', force_remount=True) # + [markdown] id="0vfbWiK_vFbG" # **Please update the start path and destination path** # + id="jtYB_BbXuvdV" start ='/content/drive/MyDrive/MUS' # Divide all 9 directories of the MAPS Dataset into train/test/val and provide its path (format eg. test/(ENS..)/MUS/) dest = '/content/drive/MyDrive/finale' # Destination Path for storing the .npy files # + id="i7SJ6kSvu5dd" RangeMIDInotes=[21,108] n_fft = 2048 sr=16000. n_mels = 252 bins_per_octave=36 n_octave=7 val_rate=1./7 pretty_midi.pretty_midi.MAX_TICK = 1e10 n_bins= n_octave * bins_per_octave hop_length = 512 win_width = 32 kernel_size=7 overlap=True # + id="-0FBIxyOu8kU" def midi2mat(midi_path_train, length, mel_len, sr, RangeMIDInotes=RangeMIDInotes): midi_data = pretty_midi.PrettyMIDI(midi_path_train) pianoRoll = midi_data.instruments[0].get_piano_roll(fs=mel_len * sr/length) Ground_truth_mat = (pianoRoll[RangeMIDInotes[0]:RangeMIDInotes[1] + 1, :mel_len] > 0) return Ground_truth_mat # + id="HARM4s-Mu9-U" fil = [direc for direc in os.listdir(start)] for direc in fil: j=0 k=0 startpath= os.path.join(start,direc) destpath = os.path.join(dest,direc) if not os.path.exists(destpath): os.makedirs(destpath) print(direc) files = [f for f in os.listdir(startpath)] for f in files: fpath=startpath f1=f # # print(fpath) # subfiles = [f1 for f1 in os.listdir(fpath)] if 1: ffile=os.path.join(fpath,f1) file_name,file_extensions=os.path.splitext(f1) if file_extensions == '.txt': continue if file_extensions==".mid": ffile=os.path.join(fpath,file_name+'.wav') x,sr = librosa.load(ffile,sr=sr) mel_spectrogram = librosa.feature.melspectrogram(x, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels) mel = np.transpose(np.abs(mel_spectrogram)) mel = np.transpose(librosa.power_to_db(np.abs(mel_spectrogram))) midi_file = os.path.join(fpath,f1) if file_extensions==".wav": midi_file = os.path.join(fpath,file_name+'.mid') Ground_truth_mat=midi2mat(midi_file, len(x), mel.shape[0], sr, RangeMIDInotes=RangeMIDInotes) midi_train = np.transpose(Ground_truth_mat) #midi length < MEL length, cut MEL if midi_train.shape[0]<mel.shape[0]: mel=mel[:midi_train.shape[0],:] if file_extensions == ".wav" : ofolder = 'wav' subname = 'MEL' no=j elif file_extensions == ".mid" : ofolder = 'mid' subname = 'label' no=k opath = os.path.join(destpath,f,ofolder,file_name)+subname+'.npy' temp_path = os.path.join(destpath,f,ofolder) if not os.path.exists(temp_path): os.makedirs(temp_path) if file_extensions == ".wav": np.save(opath,mel) elif file_extensions == ".mid": np.save(opath,midi_train) #print('Preprocessed',f1,"no",no) matrix = np.array(np.load(opath)) l=matrix.shape[0] cut_matrix=[] nb_win=int(l/win_width) #integer division=floor if not overlap: for i in range(nb_win): cut_matrix.append(matrix[i*win_width:(i+1)*win_width,:]) else: w=matrix.shape[1] matrix_1=np.concatenate([np.zeros([int(kernel_size/2),w]),matrix,np.zeros([int(kernel_size/2),w])],axis=0) #padding cut_matrix = [] for i in range(nb_win): cut_matrix.append(matrix_1[i * win_width:(i + 1) * win_width+kernel_size-1,:]) cut_matrix = np.asarray(cut_matrix) os.remove(opath) #print("Removed",f1,"no:",no) if file_extensions == ".wav": if j == 0: X = cut_matrix #print(cut_matrix.shape) else: X = np.concatenate((X,cut_matrix),axis=0) #print(cut_matrix.shape) j=j+1 elif file_extensions == ".mid": if k == 0: Y = cut_matrix #print(cut_matrix.shape) else: Y = np.concatenate((Y,cut_matrix),axis=0) #print(cut_matrix.shape) k=k+1 print('Joined ',f1,"no ",no) #print('--------------') os.rmdir(temp_path) os.rmdir(os.path.join(destpath,f)) X = np.expand_dims(X,axis=-2) Y = np.expand_dims(Y,axis=-2) X_train = np.expand_dims(X_train,axis=-2) Y_train = np.expand_dims(Y_train,axis=-2) opath1= os.path.join(destpath,"X_final_MEL_")+direc+'.npy' opath2= os.path.join(destpath,"Y_final_MEL_")+direc+'.npy' np.save(opath1,X) np.save(opath2,Y) # print('Saved X_train final') # print('Saved Y_train final') # print('X_train_Shape -',X.shape) # print('Y_train_Shape -',Y.shape)
MODEL -2 (CNN)/MEL_CNN_PreprocessingCode_Model2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Using TLS with measurement uncertainties # # TLS is capable of using measurement uncertainties, when available, in its least-squares fit. # # Every measured data point has its own measurement uncertainty ("error"). # # - We here neglect uncertainties in time, as the time stamp of typical observations has millisecond accuracy. For 30min cadences, this error is then $\approx 10^{-7}$. # # - In terms of flux, however, even a perfect photometer will produce some noise. Fundamentally, this originates from the fact that photons (starlight) and electrons (detector) are quantized [(Einstein 1905)](https://ui.adsabs.harvard.edu/#abs/1905AnP...322..132E/abstract), so that only a finite number can be counted in a given time. This phenomenon is the shot noise [(Schottky 1918)](https://ui.adsabs.harvard.edu/#abs/1918AnP...362..541S/abstract), which is correlated with the brightness of the target. In practice, other noise sources add to this (e.g., instrumental imperfections and stellar variations). These sources are typically of order 10-100 ppm per 30 min cadence or more. # # If all measurement uncertainties are equal (or can not be distinguished), no further treatment is required. # # If information about non-uniform measurement uncertainties are available, they should be included in the TLS fit. We here demonstrate their positive effect, using exaggerated noise fluctuations for clarity. In practice, fluctuations will often be less severe, so that the effect of including the estimates will be smaller. # # We start by creating synthetic data with uniform noise, and without measurement uncertainties: # + import numpy import batman # Create test data numpy.random.seed(seed=0) # reproducibility start = 48 days = 365.25 * 3 samples_per_day = 12 samples = int(days * samples_per_day) t = numpy.linspace(start, start + days, samples) # Use batman to create transits ma = batman.TransitParams() ma.t0 = (start + 20) # time of inferior conjunction; first transit is X days after start ma.per = 365.25 # orbital period ma.rp = 6371 / 696342 # planet radius (in units of stellar radii) ma.a = 217 # semi-major axis (in units of stellar radii) ma.inc = 90 # orbital inclination (in degrees) ma.ecc = 0 # eccentricity ma.w = 90 # longitude of periastron (in degrees) ma.u = [0.5] # limb darkening coefficients ma.limb_dark = "linear" # limb darkening model m = batman.TransitModel(ma, t) # initializes model original_flux = m.light_curve(ma) # calculates light curve # Create noise and merge with flux ppm = 5 stdev = 10 ** -6 * ppm noise = numpy.random.normal(0, stdev, int(samples)) y = original_flux + noise # - # We plot the synthetic data which has uniform white noise and 3 transits: import matplotlib.pyplot as plt from matplotlib import rcParams; rcParams["figure.dpi"] = 150 plt.plot(t, y); # Now, we run TLS and examine the signal detection efficiency: from transitleastsquares import transitleastsquares model = transitleastsquares(t, y) results = model.power( period_min=300, period_max=400, oversampling_factor=3, duration_grid_step=1.05, T0_fit_margin=0.2 ) print('SDE with uniform noise', format(results.SDE, '.5f')) plt.figure() ax = plt.gca() ax.axvline(results.period, alpha=0.4, lw=3) plt.xlim(numpy.min(results.periods), numpy.max(results.periods)) for n in range(2, 10): ax.axvline(n*results.period, alpha=0.4, lw=1, linestyle="dashed") ax.axvline(results.period / n, alpha=0.4, lw=1, linestyle="dashed") plt.ylabel(r'SDE') plt.xlabel('Period (days)') plt.plot(results.periods, results.power, color='black', lw=0.5) plt.text(results.period + 1, results.SDE * 0.9, 'SDE=' + format(results.SDE, '.1f')) plt.text(results.period + 1, results.SDE * 0.8, 'Uniform noise') plt.xlim(300, 400); # Next, we modify the noise levels so that there is much more noise towards the end of the time series, where no transit occurs: # + noise = numpy.random.normal(0, 10*stdev, 3149) y[10000:] = y[10000:] + noise dy = numpy.full(len(y),stdev) dy[10000:] = 10*stdev plt.plot(t, y); # - # We re-run TLS with identical settings, i.e.: not accounting for non-uniform noise: # + from transitleastsquares import transitleastsquares model = transitleastsquares(t, y) results = model.power( period_min=300, period_max=400, oversampling_factor=3, duration_grid_step=1.05, T0_fit_margin=0.2 ) print('SDE non uniform noise, no uncertainties used: ', format(results.SDE, '.5f')) plt.figure() ax = plt.gca() ax.axvline(results.period, alpha=0.4, lw=3) plt.xlim(numpy.min(results.periods), numpy.max(results.periods)) for n in range(2, 10): ax.axvline(n*results.period, alpha=0.4, lw=1, linestyle="dashed") ax.axvline(results.period / n, alpha=0.4, lw=1, linestyle="dashed") plt.ylabel(r'SDE') plt.xlabel('Period (days)') plt.plot(results.periods, results.power, color='black', lw=0.5) plt.text(results.period + 1, results.SDE * 0.9, 'SDE=' + format(results.SDE, '.1f')) plt.text(results.period + 1, results.SDE * 0.7, 'Non-uniform noise\nwithout uncertainties') plt.xlim(300, 400); # - # The signal detection efficiency has decreased from 17.3 to 14.4, and additional noise is visible towards the shorter periods in the SDE-ogram (to explain this is left as an exercise to the reader). # # Finally, we re-run TLS *with* correctly estimated measurement uncertainties: # + from transitleastsquares import transitleastsquares model = transitleastsquares(t, y, dy) # <== Here, the uncertainties are added results = model.power( period_min=300, period_max=400, oversampling_factor=3, duration_grid_step=1.05, T0_fit_margin=0.2 ) print('SDE non uniform noise, uncertainties used: ', format(results.SDE, '.5f')) plt.figure() ax = plt.gca() ax.axvline(results.period, alpha=0.4, lw=3) plt.xlim(numpy.min(results.periods), numpy.max(results.periods)) for n in range(2, 10): ax.axvline(n*results.period, alpha=0.4, lw=1, linestyle="dashed") ax.axvline(results.period / n, alpha=0.4, lw=1, linestyle="dashed") plt.ylabel(r'SDE') plt.xlabel('Period (days)') plt.plot(results.periods, results.power, color='black', lw=0.5) plt.text(results.period + 1, results.SDE * 0.9, 'SDE=' + format(results.SDE, '.1f')) plt.text(results.period + 1, results.SDE * 0.7, 'Non-uniform noise\nwith uncertainties') plt.xlim(300, 400); # - # To summarize, the results are: # - 17.3 SDE with uniform noise # - 14.4 SDE with non-uniform noise, searching with the uniform noise assumption # - 17.2 SDE with non-uniform noise, searching with correct uncertainties. # # Very clearly, the use of measurement uncertainties, where available, is beneficial.
tutorials/10 Measurement uncertainties.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/DKojen/Matrix_car/blob/master/day4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="4Rn309XYTzwn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 535} outputId="04d33846-fc1b-40e5-dc20-cf6d11ee47bf" # !pip install --upgrade tables # !pip install eli5 # !pip install xgboost # + id="Rub-xSidXleo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 190} outputId="5fc76c49-5d20-4c2f-d909-b3b49b11e183" import pandas as pd import numpy as np from sklearn.dummy import DummyRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import xgboost as xgb from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance # + id="4zWiw_DLYARj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="0bcda564-93d8-472a-d37d-250943a05771" # cd "/content/drive/My Drive/Colab Notebooks/Matrix/matrix_two/Matrix_car" # + id="ZcyjFiY2YF-Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="6152ca21-280c-4ffd-ec48-d61cd47153ce" df = pd.read_hdf('data/car.h5') df.shape # + id="z519XzHbYK4Q" colab_type="code" colab={} SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0], list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT] = factorized_values # + id="k-rFmx23Ybn5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="d9c785fa-5c5b-4745-d01c-11255313ae4d" cat_feats = [x for x in df.columns if SUFFIX_CAT in x] cat_feats = [x for x in cat_feats if 'price' not in x] len(cat_feats) # + id="fRqcfA1RYfyn" colab_type="code" colab={} def run_model(model, feats): X = df[feats].values y = df['price_value'].values scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) # + [markdown] id="FgEdmkXEZPBu" colab_type="text" # ## DecisionTree # + id="25n1X0wnY6Jy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="6d59ceae-92dd-4229-d8e1-b0494784be1c" run_model(DecisionTreeRegressor(max_depth = 5), cat_feats) # + [markdown] id="DqjbIpmgZVkf" colab_type="text" # ## RandomForest # + id="hrMMPWr_ZYG-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5df17c51-9dcd-433f-93f6-04ca7cbfdff5" model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0) run_model(model, cat_feats) # + [markdown] id="QRveTqFyZq3H" colab_type="text" # ## XGBoost # + id="BFbFBUhYZs38" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="4431a788-b835-4a79-ac52-c99bf2ce6040" xgb_params = { 'max_depth': 5, 'n_estimators': 50, 'learning_rate': 0.1, 'seed':0 } run_model(xgb.XGBRegressor(**xgb_params), cat_feats) # + id="xiL6xtV1al60" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 430} outputId="106b07cc-2c11-423d-dcd5-36cdccc1623e" m = xgb.XGBRegressor(max_depth = 5, n_estimators=50, learning_rate=0.1, seed=0) m.fit(X,y) imp = PermutationImportance(m, random_state = 0).fit(X,y) eli5.show_weights(imp, feature_names=cat_feats) # + id="rlNlrk6Lctfd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="dcf5e83d-e827-4e60-ed6d-3788711a78f5" df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) feats = ['param_napęd__cat', 'param_rok-produkcji__cat', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model(xgb.XGBRegressor(**xgb_params), feats) # + id="ctDUAvKdeNST" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="f9f201bf-96f7-47a5-87ba-d3d3df3507aa" df['param_moc'] = df['param_moc'].map(lambda x:-1 if str(x) == 'None' else int(x.split(' ')[0])) df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model(xgb.XGBRegressor(**xgb_params), feats) # + id="UfVs75bYPRo3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="706747be-922b-4cef-8288-c0af5ae4a233" df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x:-1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')) ) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model(xgb.XGBRegressor(**xgb_params), feats)
day4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="Ic1reB4s6vu1" # # The Autodiff Cookbook # # *alexbw@, mattjj@* # # JAX has a pretty general automatic differentiation system. In this notebook, we'll go through a whole bunch of neat autodiff ideas that you can cherry pick for your own work, starting with the basics. # + colab_type="code" id="JTYyZkSO6vuy" outputId="08abe649-dcdd-4638-f117-f8b82f675694" colab={"base_uri": "https://localhost:8080/", "height": 51} import jax.numpy as np from jax import grad, jit, vmap from jax import random key = random.PRNGKey(0) # + [markdown] colab_type="text" id="YxnjtAGN6vu2" # ## Gradients # + [markdown] colab_type="text" id="zqwpfr2vAsvt" # ### Starting with `grad` # # You can differentiate a function with `grad`: # + colab_type="code" id="0NLO4Wfknzmk" outputId="f9ab1433-9a9c-4912-ce10-090847fa7eb9" colab={"base_uri": "https://localhost:8080/", "height": 34} grad_tanh = grad(np.tanh) print(grad_tanh(2.0)) # + [markdown] colab_type="text" id="LGcNfDntoBZI" # `grad` takes a function and returns a function. If you have a Python function `f` that evaluates the mathematical function $f$, then `grad(f)` is a Python function that evaluates the mathematical function $\nabla f$. That means `grad(f)(x)` represents the value $\nabla f(x)$. # # Since `grad` operates on functions, you can apply it to its own output to differentiate as many times as you like: # + colab_type="code" id="RDGk1GDsoawu" outputId="5e382097-68b4-40aa-cae0-5968ac9d7d35" colab={"base_uri": "https://localhost:8080/", "height": 51} print(grad(grad(np.tanh))(2.0)) print(grad(grad(grad(np.tanh)))(2.0)) # + [markdown] colab_type="text" id="2rcnpTiinqi8" # Let's look at computing gradients with `grad` in a linear logistic regression model. First, the setup: # + colab_type="code" id="27TcOT2i6vu5" colab={} def sigmoid(x): return 0.5 * (np.tanh(x / 2) + 1) # Outputs probability of a label being true. def predict(W, b, inputs): return sigmoid(np.dot(inputs, W) + b) # Build a toy dataset. inputs = np.array([[0.52, 1.12, 0.77], [0.88, -1.08, 0.15], [0.52, 0.06, -1.30], [0.74, -2.49, 1.39]]) targets = np.array([True, True, False, True]) # Training loss is the negative log-likelihood of the training examples. def loss(W, b): preds = predict(W, b, inputs) label_probs = preds * targets + (1 - preds) * (1 - targets) return -np.sum(np.log(label_probs)) # Initialize random model coefficients key, W_key, b_key = random.split(key, 3) W = random.normal(W_key, (3,)) b = random.normal(b_key, ()) # + [markdown] colab_type="text" id="8Wk-Yai7ooh1" # Use the `grad` function with its `argnums` argument to differentiate a function with respect to positional arguments. # + colab_type="code" id="bpmd8W8-6vu6" outputId="35ec5ba1-0e83-452c-e2f2-59436fd934e8" colab={"base_uri": "https://localhost:8080/", "height": 102} # Differentiate `loss` with respect to the first positional argument: W_grad = grad(loss, argnums=0)(W, b) print('W_grad', W_grad) # Since argnums=0 is the default, this does the same thing: W_grad = grad(loss)(W, b) print('W_grad', W_grad) # But we can choose different values too, and drop the keyword: b_grad = grad(loss, 1)(W, b) print('b_grad', b_grad) # Including tuple values W_grad, b_grad = grad(loss, (0, 1))(W, b) print('W_grad', W_grad) print('b_grad', b_grad) # + [markdown] colab_type="text" id="MDl5UZl4oyzB" # This `grad` API has a direct correspondence to the excellent notation in Spivak's classic *Calculus on Manifolds* (1965), also used in Sussman and Wisdom's [*Structure and Interpretation of Classical Mechanics*](http://mitpress.mit.edu/sites/default/files/titles/content/sicm_edition_2/book.html) (2015) and their [*Functional Differential Geometry*](https://mitpress.mit.edu/books/functional-differential-geometry) (2013). Both books are open-access. See in particular the "Prologue" section of *Functional Differential Geometry* for a defense of this notation. # # Essentially, when using the `argnums` argument, if `f` is a Python function for evaluating the mathematical function $f$, then the Python expression `grad(f, i)` evaluates to a Python function for evaluating $\partial_i f$. # + [markdown] colab_type="text" id="fuz9E2vzro5E" # ### Differentiating with respect to nested lists, tuples, and dicts # + [markdown] colab_type="text" id="QQaPja7puMKi" # Differentiating with respect to standard Python containers just works, so use tuples, lists, and dicts (and arbitrary nesting) however you like. # + colab_type="code" id="IY82kdAe6vu_" outputId="52de2cef-d6f9-4662-d4e3-d9889777ca73" colab={"base_uri": "https://localhost:8080/", "height": 34} def loss2(params_dict): preds = predict(params_dict['W'], params_dict['b'], inputs) label_probs = preds * targets + (1 - preds) * (1 - targets) return -np.sum(np.log(label_probs)) print(grad(loss2)({'W': W, 'b': b})) # + [markdown] colab_type="text" id="cJ2NxiN58bfI" # You can [register your own container types](https://github.com/google/jax/issues/446#issuecomment-467105048) to work with not just `grad` but all the JAX transformations (`jit`, `vmap`, etc.). # + [markdown] colab_type="text" id="PaCHzAtGruBz" # ### Evaluate a function and its gradient using `value_and_grad` # + [markdown] colab_type="text" id="CSgCjjo-ssnA" # Another convenient function is `value_and_grad` for efficiently computing both a function's value as well as its gradient's value: # + colab_type="code" id="RsQSyT5p7OJW" outputId="54f66fc1-11cd-45eb-93e1-ac89e911de0d" colab={"base_uri": "https://localhost:8080/", "height": 51} from jax import value_and_grad loss_value, Wb_grad = value_and_grad(loss, (0, 1))(W, b) print('loss value', loss_value) print('loss value', loss(W, b)) # + [markdown] colab_type="text" id="rYTrH5tKllC_" # ### Checking against numerical differences # # A great thing about derivatives is that they're straightforward to check with finite differences: # + colab_type="code" id="R8q5RiY3l7Fw" outputId="583d8b1d-9883-4c91-a863-8284a70bb2e0" colab={"base_uri": "https://localhost:8080/", "height": 85} # Set a step size for finite differences calculations eps = 1e-4 # Check b_grad with scalar finite differences b_grad_numerical = (loss(W, b + eps / 2.) - loss(W, b - eps / 2.)) / eps print('b_grad_numerical', b_grad_numerical) print('b_grad_autodiff', grad(loss, 1)(W, b)) # Check W_grad with finite differences in a random direction key, subkey = random.split(key) vec = random.normal(subkey, W.shape) unitvec = vec / np.sqrt(np.vdot(vec, vec)) W_grad_numerical = (loss(W + eps / 2. * unitvec, b) - loss(W - eps / 2. * unitvec, b)) / eps print('W_dirderiv_numerical', W_grad_numerical) print('W_dirderiv_autodiff', np.vdot(grad(loss)(W, b), unitvec)) # + [markdown] colab_type="text" id="InzB-iiJpVcx" # JAX provides a simple convenience function that does essentially the same thing, but checks up to any order of differentiation that you like: # + colab_type="code" id="6Ok2LEfQmOuy" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="c9dde4f9-32c5-413a-cac2-16a72ba897d9" from jax.test_util import check_grads check_grads(loss, (W, b), order=2) # check up to 2nd order derivatives # + [markdown] colab_type="text" id="id0DXxwt3VJi" # ### Hessian-vector products with `grad`-of-`grad` # # One thing we can do with higher-order `grad` is build a Hessian-vector product function. (Later on we'll write an even more efficient implementation that mixes both forward- and reverse-mode, but this one will use pure reverse-mode.) # # A Hessian-vector product function can be useful in a [truncated Newton Conjugate-Gradient algorithm](https://en.wikipedia.org/wiki/Truncated_Newton_method) for minimizing smooth convex functions, or for studying the curvature of neural network training objectives (e.g. [1](https://arxiv.org/abs/1406.2572), [2](https://arxiv.org/abs/1811.07062), [3](https://arxiv.org/abs/1706.04454), [4](https://arxiv.org/abs/1802.03451)). # # For a scalar-valued function $f : \mathbb{R}^n \to \mathbb{R}$ with continuous second derivatives (so that the Hessian matrix is symmetric), the Hessian at a point $x \in \mathbb{R}^n$ is written as $\partial^2 f(x)$. A Hessian-vector product function is then able to evaluate # # $\qquad v \mapsto \partial^2 f(x) \cdot v$ # # for any $v \in \mathbb{R}^n$. # # The trick is not to instantiate the full Hessian matrix: if $n$ is large, perhaps in the millions or billions in the context of neural networks, then that might be impossible to store. # # Luckily, `grad` already gives us a way to write an efficient Hessian-vector product function. We just have to use the identity # # $\qquad \partial^2 f (x) v = \partial [x \mapsto \partial f(x) \cdot v] = \partial g(x)$, # # where $g(x) = \partial f(x) \cdot v$ is a new scalar-valued function that dots the gradient of $f$ at $x$ with the vector $v$. Notice that we're only ever differentiating scalar-valued functions of vector-valued arguments, which is exactly where we know `grad` is efficient. # # In JAX code, we can just write this: # + colab_type="code" id="Ou5OU-gU9epm" colab={} def hvp(f, x, v): return grad(lambda x: np.vdot(grad(f)(x), v))(x) # + [markdown] colab_type="text" id="Rb1-5Hpv-ZV0" # This example shows that you can freely use lexical closure, and JAX will never get perturbed or confused. # # We'll check this implementation a few cells down, once we see how to compute dense Hessian matrices. We'll also write an even better version that uses both forward-mode and reverse-mode. # + [markdown] colab_type="text" id="5A_akvtp8UTu" # ### Jacobians and Hessians using `jacfwd` and `jacrev` # + [markdown] colab_type="text" id="UP5BbmSm8ZwK" # You can compute full Jacobian matrices using the `jacfwd` and `jacrev` functions: # + colab_type="code" id="cbETzAvKvf5I" outputId="0a0c757f-6705-42d6-87c2-12c40ba83cc6" colab={"base_uri": "https://localhost:8080/", "height": 187} from jax import jacfwd, jacrev # Isolate the function from the weight matrix to the predictions f = lambda W: predict(W, b, inputs) J = jacfwd(f)(W) print("jacfwd result, with shape", J.shape) print(J) J = jacrev(f)(W) print("jacrev result, with shape", J.shape) print(J) # + [markdown] colab_type="text" id="iZDL-n_AvgBt" # These two functions compute the same values (up to machine numerics), but differ in their implementation: `jacfwd` uses forward-mode automatic differentiation, which is more efficient for "tall" Jacobian matrices, while `jacrev` uses reverse-mode, which is more efficient for "wide" Jacobian matrices. For matrices that are near-square, `jacfwd` probably has an edge over `jacrev`. # + [markdown] colab_type="text" id="zeKlr7Xz8bfm" # You can also use `jacfwd` and `jacrev` with container types: # + colab_type="code" id="eH46Xnm88bfm" outputId="236277f5-50f9-4ac1-8bd8-be3309c9d372" colab={"base_uri": "https://localhost:8080/", "height": 136} def predict_dict(params, inputs): return predict(params['W'], params['b'], inputs) J_dict = jacrev(predict_dict)({'W': W, 'b': b}, inputs) for k, v in J_dict.items(): print("Jacobian from {} to logits is".format(k)) print(v) # + [markdown] colab_type="text" id="yH34zjV88bfp" # For more details on forward- and reverse-mode, as well as how to implement `jacfwd` and `jacrev` as efficiently as possible, read on! # + [markdown] colab_type="text" id="K6Mpw_7K8bfp" # Using a composition of two of these functions gives us a way to compute dense Hessian matrices: # + colab_type="code" id="n155ypD9rfIZ" outputId="414711b8-37ba-4873-88c9-f6e34b604049" colab={"base_uri": "https://localhost:8080/", "height": 289} def hessian(f): return jacfwd(jacrev(f)) H = hessian(f)(W) print("hessian, with shape", H.shape) print(H) # + [markdown] colab_type="text" id="wvkk82R6uRoM" # This shape makes sense: if we start with a function $f : \mathbb{R}^n \to \mathbb{R}^m$, then at a point $x \in \mathbb{R}^n$ we expect to get the shapes # # * $f(x) \in \mathbb{R}^m$, the value of $f$ at $x$, # * $\partial f(x) \in \mathbb{R}^{m \times n}$, the Jacobian matrix at $x$, # * $\partial^2 f(x) \in \mathbb{R}^{m \times n \times n}$, the Hessian at $x$, # # and so on. # # To implement `hessian`, we could have used `jacfwd(jacrev(f))` or `jacrev(jacfwd(f))` or any other composition of the two. But forward-over-reverse is typically the most efficient. That's because in the inner Jacobian computation we're often differentiating a function wide Jacobian (maybe like a loss function $f : \mathbb{R}^n \to \mathbb{R}$), while in the outer Jacobian computation we're differentiating a function with a square Jacobian (since $\nabla f : \mathbb{R}^n \to \mathbb{R}^n$), which is where forward-mode wins out. # + [markdown] colab_type="text" id="OMmi9cyhs1bj" # ## How it's made: two foundational autodiff functions # + [markdown] colab_type="text" id="mtSRvouV6vvG" # ### Jacobian-Vector products (JVPs, aka forward-mode autodiff) # # JAX includes efficient and general implementations of both forward- and reverse-mode automatic differentiation. The familiar `grad` function is built on reverse-mode, but to explain the difference in the two modes, and when each can be useful, we need a bit of math background. # # #### JVPs in math # # Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}^m$, the Jacobian of $f$ evaluated at an input point $x \in \mathbb{R}^n$, denoted $\partial f(x)$, is often thought of as a matrix in $\mathbb{R}^m \times \mathbb{R}^n$: # # $\qquad \partial f(x) \in \mathbb{R}^{m \times n}$. # # But we can also think of $\partial f(x)$ as a linear map, which maps the tangent space of the domain of $f$ at the point $x$ (which is just another copy of $\mathbb{R}^n$) to the tangent space of the codomain of $f$ at the point $f(x)$ (a copy of $\mathbb{R}^m$): # # $\qquad \partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$. # # This map is called the [pushforward map](https://en.wikipedia.org/wiki/Pushforward_(differential)) of $f$ at $x$. The Jacobian matrix is just the matrix for this linear map in a standard basis. # # If we don't commit to one specific input point $x$, then we can think of the function $\partial f$ as first taking an input point and returning the Jacobian linear map at that input point: # # $\qquad \partial f : \mathbb{R}^n \to \mathbb{R}^n \to \mathbb{R}^m$. # # In particular, we can uncurry things so that given input point $x \in \mathbb{R}^n$ and a tangent vector $v \in \mathbb{R}^n$, we get back an output tangent vector in $\mathbb{R}^m$. We call that mapping, from $(x, v)$ pairs to output tangent vectors, the *Jacobian-vector product*, and write it as # # $\qquad (x, v) \mapsto \partial f(x) v$ # # #### JVPs in JAX code # # Back in Python code, JAX's `jvp` function models this transformation. Given a Python function that evaluates $f$, JAX's `jvp` is a way to get a Python function for evaluating $(x, v) \mapsto (f(x), \partial f(x) v)$. # + colab_type="code" id="pTncYR6F6vvG" colab={} from jax import jvp # Isolate the function from the weight matrix to the predictions f = lambda W: predict(W, b, inputs) key, subkey = random.split(key) v = random.normal(subkey, W.shape) # Push forward the vector `v` along `f` evaluated at `W` y, u = jvp(f, (W,), (v,)) # + [markdown] colab_type="text" id="m1VJgJYQGfCK" # In terms of Haskell-like type signatures, we could write # # ```haskell # jvp :: (a -> b) -> a -> T a -> (b, T b) # ``` # # where we use `T a` to denote the type of the tangent space for `a`. In words, `jvp` takes as arguments a function of type `a -> b`, a value of type `a`, and a tangent vector value of type `T a`. It gives back a pair consisting of a value of type `b` and an output tangent vector of type `T b`. # + [markdown] colab_type="text" id="3RpbiasHGD3X" # The `jvp`-transformed function is evaluated much like the original function, but paired up with each primal value of type `a` it pushes along tangent values of type `T a`. For each primitive numerical operation that the original function would have applied, the `jvp`-transformed function executes a "JVP rule" for that primitive that both evaluates the primitive on the primals and applies the primitive's JVP at those primal values. # # That evaluation strategy has some immediate implications about computational complexity: since we evaluate JVPs as we go, we don't need to store anything for later, and so the memory cost is independent of the depth of the computation. In addition, the FLOP cost of the `jvp`-transformed function is about 3x the cost of just evaluating the function (one unit of work for evaluating the original function, for example `sin(x)`; one unit for linearizing, like `cos(x)`; and one unit for applying the linearized function to a vector, like `cos_x * v`). Put another way, for a fixed primal point $x$, we can evaluate $v \mapsto \partial f(x) \cdot v$ for about the same marginal cost as evaluating $f$. # # That memory complexity sounds pretty compelling! So why don't we see forward-mode very often in machine learning? # # To answer that, first think about how you could use a JVP to build a full Jacobian matrix. If we apply a JVP to a one-hot tangent vector, it reveals one column of the Jacobian matrix, corresponding to the nonzero entry we fed in. So we can build a full Jacobian one column at a time, and to get each column costs about the same as one function evaluation. That will be efficient for functions with "tall" Jacobians, but inefficient for "wide" Jacobians. # # If you're doing gradient-based optimization in machine learning, you probably want to minimize a loss function from parameters in $\mathbb{R}^n$ to a scalar loss value in $\mathbb{R}$. That means the Jacobian of this function is a very wide matrix: $\partial f(x) \in \mathbb{R}^{1 \times n}$, which we often identify with the Gradient vector $\nabla f(x) \in \mathbb{R}^n$. Building that matrix one column at a time, with each call taking a similar number of FLOPs to evaluating the original function, sure seems inefficient! In particular, for training neural networks, where $f$ is a training loss function and $n$ can be in the millions or billions, this approach just won't scale. # # To do better for functions like this, we just need to use reverse-mode. # + [markdown] colab_type="text" id="PhkvkZazdXu1" # ### Vector-Jacobian products (VJPs, aka reverse-mode autodiff) # # Where forward-mode gives us back a function for evaluating Jacobian-vector products, which we can then use to build Jacobian matrices one column at a time, reverse-mode is a way to get back a function for evaluating vector-Jacobian products (equivalently Jacobian-transpose-vector products), which we can use to build Jacobian matrices one row at a time. # # #### VJPs in math # # Let's again consider a function $f : \mathbb{R}^n \to \mathbb{R}^m$. # Starting from our notation for JVPs, the notation for VJPs is pretty simple: # # $\qquad (x, v) \mapsto v \partial f(x)$, # # where $v$ is an element of the cotangent space of $f$ at $x$ (isomorphic to another copy of $\mathbb{R}^m$). When being rigorous, we should think of $v$ as a linear map $v : \mathbb{R}^m \to \mathbb{R}$, and when we write $v \partial f(x)$ we mean function composition $v \circ \partial f(x)$, where the types work out because $\partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$. But in the common case we can identify $v$ with a vector in $\mathbb{R}^m$ and use the two almost interchageably, just like we might sometimes flip between "column vectors" and "row vectors" without much comment. # # With that identification, we can alternatively think of the linear part of a VJP as the transpose (or adjoint conjugate) of the linear part of a JVP: # # $\qquad (x, v) \mapsto \partial f(x)^\mathsf{T} v$. # # For a given point $x$, we can write the signature as # # $\qquad \partial f(x)^\mathsf{T} : \mathbb{R}^m \to \mathbb{R}^n$. # # The corresponding map on cotangent spaces is often called the [pullback](https://en.wikipedia.org/wiki/Pullback_(differential_geometry)) # of $f$ at $x$. The key for our purposes is that it goes from something that looks like the output of $f$ to something that looks like the input of $f$, just like we might expect from a transposed linear function. # # #### VJPs in JAX code # # Switching from math back to Python, the JAX function `vjp` can take a Python function for evaluating $f$ and give us back a Python function for evaluating the VJP $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$. # + colab_type="code" id="1tFcRuEzkGRR" colab={} from jax import vjp # Isolate the function from the weight matrix to the predictions f = lambda W: predict(W, b, inputs) y, vjp_fun = vjp(f, W) key, subkey = random.split(key) u = random.normal(subkey, y.shape) # Pull back the covector `u` along `f` evaluated at `W` v = vjp_fun(u) # + [markdown] colab_type="text" id="oVOZexCEkvv3" # In terms of Haskell-like type signatures, we could write # # ```haskell # vjp :: (a -> b) -> a -> (b, CT b -> CT a) # ``` # # where we use `CT a` to denote the type for the cotangent space for `a`. In words, `vjp` takes as arguments a function of type `a -> b` and a point of type `a`, and gives back a pair consisting of a value of type `b` and a linear map of type `CT b -> CT a`. # # This is great because it lets us build Jacobian matrices one row at a time, and the FLOP cost for evaluating $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$ is only about three times the cost of evaluating $f$. In particular, if we want the gradient of a function $f : \mathbb{R}^n \to \mathbb{R}$, we can do it in just one call. That's how `grad` is efficient for gradient-based optimization, even for objectives like neural network training loss functions on millions or billions of parameters. # # There's a cost, though: though the FLOPs are friendly, memory scales with the depth of the computation. Also, the implementation is traditionally more complex than that of forward-mode, though JAX has some tricks up its sleeve (that's a story for a future notebook!). # # For more on how reverse-mode works, see [this tutorial video from the Deep Learning Summer School in 2017](http://videolectures.net/deeplearning2017_johnson_automatic_differentiation/). # + [markdown] colab_type="text" id="MWHcAPqLdJFn" # ### Hessian-vector products using both forward- and reverse-mode # + [markdown] colab_type="text" id="YG3g5C3KdW7H" # In a previous section, we implemented a Hessian-vector product function just using reverse-mode (assuming continuous second derivatives): # + colab_type="code" id="C70CA-7wdelL" colab={} def hvp(f, x, v): return grad(lambda x: np.vdot(grad(f)(x), v))(x) # + [markdown] colab_type="text" id="zJlJbFKCdfd0" # That's efficient, but we can do even better and save some memory by using forward-mode together with reverse-mode. # # Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}$ to differentiate, a point $x \in \mathbb{R}^n$ at which to linearize the function, and a vector $v \in \mathbb{R}^n$, the Hessian-vector product function we want is # # $(x, v) \mapsto \partial^2 f(x) v$ # # Consider the helper function $g : \mathbb{R}^n \to \mathbb{R}^n$ defined to be the derivative (or gradient) of $f$, namely $g(x) = \partial f(x)$. All we need is its JVP, since that will give us # # $(x, v) \mapsto \partial g(x) v = \partial^2 f(x) v$. # # We can translate that almost directly into code: # + colab_type="code" id="rq3C0reVfAaI" colab={} from jax import jvp, grad # forward-over-reverse def hvp(f, primals, tangents): return jvp(grad(f), primals, tangents)[1] # + [markdown] colab_type="text" id="XUsye1SwfSFm" # Even better, since we didn't have to call `np.dot` directly, this `hvp` function works with arrays of any shape and with arbitrary container types (like vectors stored as nested lists/dicts/tuples), and doesn't even have a dependence on `jax.numpy`. # # Here's an example of how to use it: # + colab_type="code" id="bmpuQa5_f1Al" outputId="9928e5a1-9706-48bf-f378-e8ac7ae449bc" colab={"base_uri": "https://localhost:8080/", "height": 34} def f(X): return np.sum(np.tanh(X)**2) key, subkey1, subkey2 = random.split(key, 3) X = random.normal(subkey1, (30, 40)) V = random.normal(subkey2, (30, 40)) ans1 = hvp(f, (X,), (V,)) ans2 = np.tensordot(hessian(f)(X), V, 2) print(np.allclose(ans1, ans2, 1e-4, 1e-4)) # + [markdown] colab_type="text" id="aWTii5TyXL5C" # Another way you might consider writing this is using reverse-over-forward: # + colab_type="code" id="YxwmXZH2XQrw" colab={} # reverse-over-forward def hvp_revfwd(f, primals, tangents): g = lambda primals: jvp(f, primals, tangents)[1] return grad(g)(primals) # + [markdown] colab_type="text" id="8z-QG_xTXR4I" # That's not quite as good, though, because forward-mode has less overhead than reverse-mode, and since the outer differentiation operator here has to differentiate a larger computation than the inner one, keeping forward-mode on the outside works best: # + colab_type="code" id="lxfv25qTX5gZ" outputId="65a04b9b-1510-4123-b352-d2a63d512e5c" colab={"base_uri": "https://localhost:8080/", "height": 153} # reverse-over-reverse, only works for single arguments def hvp_revrev(f, primals, tangents): x, = primals v, = tangents return grad(lambda x: np.vdot(grad(f)(x), v))(x) print("Forward over reverse") # %timeit -n10 -r3 hvp(f, (X,), (V,)) print("Reverse over forward") # %timeit -n10 -r3 hvp_revfwd(f, (X,), (V,)) print("Reverse over reverse") # %timeit -n10 -r3 hvp_revrev(f, (X,), (V,)) print("Naive full Hessian materialization") # %timeit -n10 -r3 np.tensordot(hessian(f)(X), V, 2) # + [markdown] colab_type="text" id="xtqSUJgzwQXO" # ## Composing VJPs, JVPs, and `vmap` # + [markdown] colab_type="text" id="PSL1TciM6vvI" # ### Jacobian-Matrix and Matrix-Jacobian products # # Now that we have `jvp` and `vjp` transformations that give us functions to push-forward or pull-back single vectors at a time, we can use JAX's `vmap` [transformation](https://github.com/google/jax#auto-vectorization-with-vmap) to push and pull entire bases at once. In particular, we can use that to write fast matrix-Jacobian and Jacobian-matrix products. # + colab_type="code" id="asAWvxVaCmsx" outputId="aa90dc09-454b-49b1-a17a-fdb2c52460c6" colab={"base_uri": "https://localhost:8080/", "height": 102} # Isolate the function from the weight matrix to the predictions f = lambda W: predict(W, b, inputs) # Pull back the covectors `m_i` along `f`, evaluated at `W`, for all `i`. # First, use a list comprehension to loop over rows in the matrix M. def loop_mjp(f, x, M): y, vjp_fun = vjp(f, x) return np.vstack([vjp_fun(mi) for mi in M]) # Now, use vmap to build a computation that does a single fast matrix-matrix # multiply, rather than an outer loop over vector-matrix multiplies. def vmap_mjp(f, x, M): y, vjp_fun = vjp(f, x) outs, = vmap(vjp_fun)(M) return outs key = random.PRNGKey(0) num_covecs = 128 U = random.normal(key, (num_covecs,) + y.shape) loop_vs = loop_mjp(f, W, M=U) print('Non-vmapped Matrix-Jacobian product') # %timeit -n10 -r3 loop_mjp(f, W, M=U) print('\nVmapped Matrix-Jacobian product') vmap_vs = vmap_mjp(f, W, M=U) # %timeit -n10 -r3 vmap_mjp(f, W, M=U) assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Matrix-Jacobian Products should be identical' # + colab_type="code" id="TDaxsJrlDraK" outputId="37b33c33-df98-4545-88fa-e6a3a5cbdbe5" colab={"base_uri": "https://localhost:8080/", "height": 102} def loop_jmp(f, x, M): # jvp immediately returns the primal and tangent values as a tuple, # so we'll compute and select the tangents in a list comprehension return np.vstack([jvp(f, (W,), (mi,))[1] for mi in M]) def vmap_jmp(f, x, M): _jvp = lambda s: jvp(f, (W,), (s,))[1] return vmap(_jvp)(M) num_vecs = 128 S = random.normal(key, (num_vecs,) + W.shape) loop_vs = loop_jmp(f, W, M=S) print('Non-vmapped Jacobian-Matrix product') # %timeit -n10 -r3 loop_jmp(f, W, M=S) vmap_vs = vmap_jmp(f, W, M=S) print('\nVmapped Jacobian-Matrix product') # %timeit -n10 -r3 vmap_jmp(f, W, M=S) assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Jacobian-Matrix products should be identical' # + [markdown] colab_type="text" id="MXFEFBDz6vvL" # ### The implementation of `jacfwd` and `jacrev` # # # + [markdown] colab_type="text" id="ZAgUb6sp8bf7" # Now that we've seen fast Jacobian-matrix and matrix-Jacobian products, it's not hard to guess how to write `jacfwd` and `jacrev`. We just use the same technique to push-forward or pull-back an entire standard basis (isomorphic to an identity matrix) at once. # + colab_type="code" id="HBEzsDH1U5_4" colab={} from jax import jacrev as builtin_jacrev def our_jacrev(f): def jacfun(x): y, vjp_fun = vjp(f, x) # Use vmap to do a matrix-Jacobian product. # Here, the matrix is the Euclidean basis, so we get all # entries in the Jacobian at once. J, = vmap(vjp_fun, in_axes=0)(np.eye(len(y))) return J return jacfun assert np.allclose(builtin_jacrev(f)(W), our_jacrev(f)(W)), 'Incorrect reverse-mode Jacobian results!' # + colab_type="code" id="Qd9gVZ5t6vvP" colab={} from jax import jacfwd as builtin_jacfwd def our_jacfwd(f): def jacfun(x): _jvp = lambda s: jvp(f, (x,), (s,))[1] Jt =vmap(_jvp, in_axes=1)(np.eye(len(x))) return np.transpose(Jt) return jacfun assert np.allclose(builtin_jacfwd(f)(W), our_jacfwd(f)(W)), 'Incorrect forward-mode Jacobian results!' # + [markdown] colab_type="text" id="7r5_m9Y68bf_" # Interestingly, [Autograd](https://github.com/hips/autograd) couldn't do this. Our [implementation](https://github.com/HIPS/autograd/blob/96a03f44da43cd7044c61ac945c483955deba957/autograd/differential_operators.py#L60) of reverse-mode `jacobian` in Autograd had to pull back one vector at a time with an outer-loop `map`. Pushing one vector at a time through the computation is much less efficient than batching it all together with `vmap`. # + [markdown] colab_type="text" id="9maev0Nd8bf_" # Another thing that Autograd couldn't do is `jit`. Interestingly, no matter how much Python dynamism you use in your function to be differentiated, we could always use `jit` on the linear part of the computation. For example: # + colab_type="code" id="_5jDflC08bgB" outputId="964c1bae-afcb-40ae-c589-ac80ff97a7d5" colab={"base_uri": "https://localhost:8080/", "height": 34} def f(x): try: if x < 3: return 2 * x ** 3 else: raise ValueError except ValueError: return np.pi * x y, f_vjp = vjp(f, 4.) print(jit(f_vjp)(1.)) # + [markdown] colab_type="text" id="3fPWLrxK8bgD" # ## Complex numbers and differentiation # + [markdown] colab_type="text" id="2pZOHvrm8bgE" # JAX is great at complex numbers and differentiation. To support both [holomorphic and non-holomorphic differentiation](https://en.wikipedia.org/wiki/Holomorphic_function), it helps to think in terms of JVPs and VJPs. # # Consider a complex-to-complex function $f: \mathbb{C} \to \mathbb{C}$ and identify it with a corresponding function $g: \mathbb{R}^2 \to \mathbb{R}^2$, # + colab_type="code" id="OaqZ2MuP8bgF" colab={} def f(z): x, y = np.real(z), np.imag(z) return u(x, y) + v(x, y) * 1j def g(x, y): return (u(x, y), v(x, y)) # + [markdown] colab_type="text" id="3XB5oGxl8bgH" # That is, we've decomposed $f(z) = u(x, y) + v(x, y) i$ where $z = x + y i$, and identified $\mathbb{C}$ with $\mathbb{R}^2$ to get $g$. # + [markdown] id="6fBBMxqpiVjF" colab_type="text" # Since $g$ only involves real inputs and outputs, we already know how to write a Jacobian-vector product for it, say given a tangent vector $(c, d) \in \mathbb{R}^2$, namely # # $\begin{bmatrix} \partial_0 u(x, y) & \partial_1 u(x, y) \\ \partial_0 v(x, y) & \partial_1 v(x, y) \end{bmatrix} # \begin{bmatrix} c \\ d \end{bmatrix}$. # # To get a JVP for the original function $f$ applied to a tangent vector $c + di \in \mathbb{C}$, we just use the same definition and identify the result as another complex number, # # $\partial f(x + y i)(c + d i) = # \begin{matrix} \begin{bmatrix} 1 & i \end{bmatrix} \\ ~ \end{matrix} # \begin{bmatrix} \partial_0 u(x, y) & \partial_1 u(x, y) \\ \partial_0 v(x, y) & \partial_1 v(x, y) \end{bmatrix} # \begin{bmatrix} c \\ d \end{bmatrix}$. # # That's our definition of the JVP of a $\mathbb{C} \to \mathbb{C}$ function! Notice it doesn't matter whether or not $f$ is holomorphic: the JVP is unambiguous. # + [markdown] id="6SL6dWtFpBUr" colab_type="text" # Here's a check: # + id="BGZV__zupIMS" colab_type="code" colab={} def check(seed): key = random.PRNGKey(seed) # random coeffs for u and v key, subkey = random.split(key) a, b, c, d = random.uniform(subkey, (4,)) def fun(z): x, y = np.real(z), np.imag(z) return u(x, y) + v(x, y) * 1j def u(x, y): return a * x + b * y def v(x, y): return c * x + d * y # primal point key, subkey = random.split(key) x, y = random.uniform(subkey, (2,)) z = x + y * 1j # tangent vector key, subkey = random.split(key) c, d = random.uniform(subkey, (2,)) z_dot = c + d * 1j # check jvp _, ans = jvp(fun, (z,), (z_dot,)) expected = (grad(u, 0)(x, y) * c + grad(u, 1)(x, y) * d + grad(v, 0)(x, y) * c * 1j+ grad(v, 1)(x, y) * d * 1j) print(np.allclose(ans, expected)) # + id="I2OBU3OGp-CY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="ad7c662f-be05-4472-e8fe-8c1c58ce1771" check(0) check(1) check(2) # + [markdown] id="XjWMgDOimUcU" colab_type="text" # What about VJPs? We do something pretty similar: for a cotangent vector $c + di \in \mathbb{C}$ we define the VJP of $f$ as # # $(c + di)^* \; \partial f(x + y i) = # \begin{matrix} \begin{bmatrix} c & -d \end{bmatrix} \\ ~ \end{matrix} # \begin{bmatrix} \partial_0 u(x, y) & \partial_1 u(x, y) \\ \partial_0 v(x, y) & \partial_1 v(x, y) \end{bmatrix} # \begin{bmatrix} 1 \\ -i \end{bmatrix}$. # # What's with the negatives? They're just to take care of complex conjugation, and the fact that we're working with covectors. # + [markdown] id="oRu2VRjmtrgB" colab_type="text" # Here's a check of the VJP rules: # + id="4J7edvIBttcU" colab_type="code" colab={} def check(seed): key = random.PRNGKey(seed) # random coeffs for u and v key, subkey = random.split(key) a, b, c, d = random.uniform(subkey, (4,)) def fun(z): x, y = np.real(z), np.imag(z) return u(x, y) + v(x, y) * 1j def u(x, y): return a * x + b * y def v(x, y): return c * x + d * y # primal point key, subkey = random.split(key) x, y = random.uniform(subkey, (2,)) z = x + y * 1j # cotangent vector key, subkey = random.split(key) c, d = random.uniform(subkey, (2,)) z_bar = np.array(c + d * 1j) # for dtype control # check vjp _, fun_vjp = vjp(fun, z) ans, = fun_vjp(z_bar) expected = (grad(u, 0)(x, y) * c + grad(v, 0)(x, y) * (-d) + grad(u, 1)(x, y) * c * (-1j) + grad(v, 1)(x, y) * (-d) * (-1j)) assert np.allclose(ans, expected, atol=1e-5, rtol=1e-5) # + id="RieNCdXgtzs7" colab_type="code" colab={} check(0) check(1) check(2) # + [markdown] id="7I6A19Myt3qN" colab_type="text" # What about convenience wrappers like `grad`, `jacfwd`, and `jacrev`? # # For $\mathbb{R} \to \mathbb{R}$ functions, recall we defined `grad(f)(x)` as being `vjp(f, x)[1](1.0)`, which works because applying a VJP to a `1.0` value reveals the gradient (i.e. Jacobian, or derivative). We can do the same thing for $\mathbb{C} \to \mathbb{R}$ functions: we can still use `1.0` as the cotangent vector, and we just get out a complex number result summarizing the full Jacobian: # + id="xz_9lK61wGdm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9715dfb0-f50d-425c-fd85-c9e0a3147921" def f(z): x, y = np.real(z), np.imag(z) return x**2 + y**2 z = 3. + 4j grad(f)(z) # + [markdown] id="jqCvEE8qwGw7" colab_type="text" # For geneneral $\mathbb{C} \to \mathbb{C}$ functions, the Jacobian has 4 real-valued degrees of freedom (as in the 2x2 Jacobian matrices above), so we can't hope to represent all of them with in a complex number. But we can for holomorphic functions! A holomorphic function is precisely a $\mathbb{C} \to \mathbb{C}$ function with the special property that its derivative can be represented as a single complex number. (The [Cauchy-Riemann equations](https://en.wikipedia.org/wiki/Cauchy%E2%80%93Riemann_equations) ensure that the above 2x2 Jacobians have the special form of a scale-and-rotate matrix in the complex plane, i.e. the action of a single complex number under multiplication.) And we can reveal that one complex number using a single call to `vjp` with a covector of `1.0`. # # Because this only works for holomorphic functions, to use this trick we need to promise JAX that our function is holomorphic; otherwise, JAX will raise an error when `grad` is used for a complex-output function: # + id="Y3n9hPVrwvXx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d278e7df-ccc6-4de0-806c-31dbc3d6aaaf" def f(z): return np.sin(z) z = 3. + 4j grad(f, holomorphic=True)(z) # + [markdown] id="LjIbDxX-w9Qf" colab_type="text" # All the `holomorphic=True` promise does is disable the error when the output is complex-valued. We can still write `holomorphic=True` when the function isn't holomorphic, but the answer we get out won't represent the full Jacobian. Instead, it'll be the Jacobian of the function where we just discard the imaginary part of the output: # + id="th9xhwp2xaeU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="748dcfbf-f00c-4a17-a550-9833a9fb089f" def f(z): return np.conjugate(z) z = 3. + 4j grad(f, holomorphic=True)(z) # f is not actually holomorphic! # + [markdown] id="R8ytpfeXyBu2" colab_type="text" # There are some useful upshots for how `grad` works here: # # 1. We can use `grad` on holomorphic $\mathbb{C} \to \mathbb{C}$ functions. # 2. We can use `grad` to optimize $f : \mathbb{C} \to \mathbb{R}$ functions, like real-valued loss functions of complex parameters `x`, by taking steps in the dierction of the conjugate of `grad(f)(x)`. # 3. If we have an $\mathbb{R} \to \mathbb{R}$ function that just happens to use some complex-valued operations internally (some of which must be non-holomorphic, e.g. FFTs used in covolutions) then `grad` still works and we get the same result that an implementation using only real values would have given. # # In any case, JVPs and VJPs are always unambiguous. And if we wanted to compute the full Jacobian matrix of a non-holomorphic $\mathbb{C} \to \mathbb{C}$ function, we can do it with JVPs or VJPs! # + [markdown] colab_type="text" id="qmXkI37T8bgL" # You should expect complex numbers to work everywhere in JAX. Here's differentiating through a Cholesky decomposition of a complex matrix: # + colab_type="code" id="WrDHHfKI8bgM" outputId="4eb13d90-cce5-42f8-e025-e1d62c380a84" colab={"base_uri": "https://localhost:8080/", "height": 119} A = np.array([[5., 2.+3j, 5j], [2.-3j, 7., 1.+7j], [-5j, 1.-7j, 12.]]) def f(X): L = np.linalg.cholesky(X) return np.sum((L - np.sin(L))**2) grad(f, holomorphic=True)(A) # + [markdown] colab_type="text" id="Pgr2A60q9gl1" # ## More advanced autodiff # # In this notebook, we worked through some easy, and then progressively more complicated, applications of automatic differentiation in JAX. We hope you now feel that taking derivatives in JAX is easy and powerful. # # There's a whole world of other autodiff tricks and functionality out there. Topics we didn't cover, but hope to in a "Advanced Autodiff Cookbook" include: # # - Gauss-Newton Vector Products, linearizing once # - Custom VJPs and JVPs # - Efficient derivatives at fixed-points # - Estimating the trace of a Hessian using random Hessian-vector products. # - Forward-mode autodiff using only reverse-mode autodiff. # - Taking derivatives with respect to custom data types. # - Checkpointing (binomial checkpointing for efficient reverse-mode, not model snapshotting). # - Optimizing VJPs with Jacobian pre-accumulation.
docs/notebooks/autodiff_cookbook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ds17-unit-1 # language: python # name: ds17-unit-1 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jgutierrez2219/Daily-Warm-Ups/blob/master/Copy_of_basics.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="eaCRXrCTg1GI" colab_type="text" # # Basics # + [markdown] id="_E_4A16jg1GJ" colab_type="text" # ## load a csv from the following url: # # #### https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/ # # # + id="da2hZDXgg1GK" colab_type="code" colab={} # A good practice is to keep your import statements to the top of your # notebooks import pandas as pd # + id="L1p1dNNUg1GO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 207} outputId="ec8a42e0-8461-4257-aac1-efeb0d95226c" # One thing you should begin practicing early is commenting your code! # Another good practice is to use descriptive variable names auto_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data' df = pd.read_csv(auto_data_url,header = None ) print(df.shape) df.head() # + id="YtQrEpCUg1GS" colab_type="code" outputId="242bf64b-4310-4218-9682-d314394e70f6" colab={"base_uri": "https://localhost:8080/", "height": 265} # now print the first 5 rows of the data you just loaded print(df.shape) df.head(7) # + id="mEjsxF2pg1Ga" colab_type="code" colab={} # how many rows are in the dataset? # + id="ty_r3e9Fg1Ge" colab_type="code" outputId="ce1ed52c-e24a-4cb0-bb76-709af59fbd6f" colab={} # how many columns # + id="oycDonvQ8W4n" colab_type="code" colab={} # what is the shape of the dataset # + [markdown] id="r5MhdTKxBzHq" colab_type="text" # (4176, 9) # + [markdown] id="Ho8gLGlGg1Gl" colab_type="text" # ### From the data directory load the iris dataset # + id="fzbSHSXwg1Gm" colab_type="code" outputId="87cae6fc-d957-4e48-cfed-c99e6d32bdbc" colab={"base_uri": "https://localhost:8080/", "height": 207} iris_data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data" column_headers = [ "sepal-length", "sepal-width", "petal-length", "petal_width", "class" ] df = pd.read_csv(iris_data_url,header= None,names=column_headers) print(df.shape) df.head() # + [markdown] id="mAMilAWUhPDE" colab_type="text" # ## Make a basic graph using either of the above data sources when you're finished post the graph in the ds17 channel on slack # + id="LDI-Lv4mg1Gs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 278} outputId="ef3a7e28-1205-4c7d-f72e-e09acc5df903" df.plot.scatter('sepal-length','petal_width'); # + [markdown] id="CeKc8dQyhdhf" colab_type="text" # ## Save a copy of your notebook to GitHub and submit a pull request to this repository # #
Copy_of_basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Network Diagram with Sigma.js # This example uses [sigma.js](http://sigmajs.org/) to visualize a network produced in python. # #### Notebook Config from IPython.core.display import display, HTML from string import Template import pandas as pd import json, random HTML(''' <script src="lib/sigmajs/sigma.min.js"></script> <script src="js/sigma-add-method-neighbors.js"></script> ''') # #### Network Construction # + random.seed(42) n_nodes = 40 n_edges = 200 graph_data = { 'nodes': [], 'edges': [] } for i in range(n_nodes): graph_data['nodes'].append({ "id": "n" + str(i), "label": "n" + str(i), "x": random.uniform(0,1), "y": random.uniform(0,1), "size": random.uniform(0.2,1) }) for j in range(n_edges): x_center = random.uniform(0,1) y_center = random.uniform(0,1) x_dist = random.uniform(0.1,0.5) y_dist = random.uniform(0.2,0.5) neighborhood = [] for node in graph_data['nodes']: if abs(node['x'] - x_center) < x_dist: if abs(node['y'] - y_center) < y_dist: neighborhood.append(int(node['id'].replace('n',''))) if len(neighborhood) >= 2: ends = random.sample(neighborhood,2) graph_data['edges'].append({ "id": "e" + str(j), "source": "n" + str(ends[0]), "target": "n" + str(ends[1]) }) # - pd.DataFrame(graph_data['nodes']).head() pd.DataFrame(graph_data['edges']).head() # #### Visualization # + js_text_template = Template(open('js/sigma-graph.js','r').read()) js_text = js_text_template.substitute({'graph_data': json.dumps(graph_data), 'container': 'graph-div'}) html_template = Template(''' <div id="graph-div" style="height:400px"></div> <script> $js_text </script> ''') HTML(html_template.substitute({'js_text': js_text})) # - # Note that you can zoom and pan (click and move) to navigate the graph. Also note that if you click on a node, it highlights it and the nodes to which it is directly connected, along with all of the edges within this neighborhood.
ex6_sigma_js_graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # Setup # # ## Load packages # + import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy import stats from tqdm import tqdm from scipy.special import binom import itertools import helpers as hp import simulations as sims import importlib importlib.reload(sims) plt.style.use('ggplot') FIG_PATH = "./../../figures/" # - # ## Load processed data data= pd.read_csv('./../../experimental_data/processed_data/single_cell_data.csv') data_avg = pd.read_csv('./../../experimental_data/processed_data/avg_data.csv') data_avg_one_cell = pd.read_csv('./../../experimental_data/processed_data/avg_data_one_cell.csv') fits = pd.read_csv('./output/fits_summary.csv') fits['p'] = fits.beta_population.values/(2*fits.alpha_population.values) experiments = data.experiment.unique() experiments def get_lags(df): df2 = df[df.initial_cells==1] df2 = df2[df2.bf>1] return np.array([np.min(df2[df2.well==well].time) for well in df2.well.unique()]) data_one_cell = data[data.initial_cells==1] fig,axs = plt.subplots(ncols = len(experiments),figsize=(10,2),sharex=True,sharey="row") k = 0 for exp in experiments: ax = axs[k] ax.set_title(data_avg[data_avg.experiment== exp].defect.values[0]\ +'\n'+data_avg[data_avg.experiment== exp].nuclease.values[0]) lags = get_lags(data[data.experiment==exp]) ax.hist(lags,10,density=True); ax.vlines([np.mean(lags)],[0],[0.005],'C1',linestyle='--') ax.grid(False) print(exp,"\t\tlag = ",np.var(lags)) k+=1
code/analysis/lag_distribution_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="IUqxF8tVSov0" # <div style="text-align: right"> # <i> # LIN 5300<br> # Spring 2022 <br> # <NAME> # </i> # </div> # # + id="zdioV4R6HrER" from google.colab import files from collections import Counter from pprint import pprint import re # + [markdown] id="0UcG19eWHMJd" # # From counts to probabilities and bigram models # # In the previous notebook we have played around with ways to obtain counts for words, and do some basic pre-processing. # But it would still be nice to get a few other metrics, such as # # 1. the frequencies of word types rather than their total counts (this makes it easier to compare different texts since 50 mentions of "buletic" in a 1000-page novel doesn't have the same weight as 50 mentions in a 1000-word essay), # 2. Frequencies of bigrams (or ngrams) # 3. Use counts information to get an MLE of conditional probabilites # # # Before we continue, though, we once again have to run all the relevant code to get counts for our texts *Hamlet* and *Princess of Mars*. # + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 39} id="o182kTVCH4Ar" outputId="5079f806-c2fd-4bc7-e471-b0259495e7b4" #Import files (this code is specific to reading files into Colab) # Be sure to upload the file matching the variable name hamlet = files.upload() hamlet_full = hamlet['hamlet_clean.txt'].decode('utf-8') mars = files.upload() mars_full = mars['mars_clean.txt'].decode('utf-8') # + id="--l3sVZxHMJf" def tokenize(the_string): """Convert string to list of words""" return re.findall(r"\w+", the_string) # define a variable for each token list hamlet = Counter(tokenize(hamlet_full)) mars = Counter(tokenize(mars_full)) # + [markdown] id="nxNI5pR7HMJf" # ## Calculating frequencies # # The frequency of a word indicates how many percent of a text are taken up by its tokens. # For example, if a word type has 6 tokens in a text of 1000 words, then its frequency is $\frac{6}{1000} = 0.006 = 0.6\%$. # So we get the frequency of a word *w* by dividing the count of *w* by the total number of tokens in the text. # # We already have many of the tools that are needed to calculate frequencies: # # 1. a counter for each tokenized text, and # 1. `for`-loops, and # 1. the `len`-function, and # 1. the `sum`-function, and # 1. the usage of keys such as `[x]` to get the value of a specific item `x` in a counter. # + [markdown] id="u0mcM-2kHMJg" # We can recombine these techniques to define a custom function for computing word frequencies. # # First, we will need to determine the total number of tokens in the text. # But we already know how to do that with `sum` and `Counter.values`. # Once we know the total, we can calculate the frequency of a word type by dividing its number of tokens by `total`. # Let us put the relevant code for these steps into a function that prints the frequency of every word type. # + id="yZSG1ap-HMJg" def frequencies(word_counter): """print frequency for each word type in counter""" total = sum(Counter.values(word_counter)) # calculate frequencies for all the words in the counter for current_word in word_counter: number_of_tokens = word_counter[current_word] frequency = number_of_tokens / total print(current_word, frequency) # + id="jhJHbM_ZJL2S" # We let's test it of we want # But be warned that it is going to be a long (in terms of screen space) print frequencies(hamlet) # + [markdown] id="G8Y-hsWpHMJh" # ### Adding frequencies to the counter # # The `frequencie` function is still somewhat unsatisfying in that it prints the frequency of each word type. # Printing to screen isn't very useful most of the time, in particular with tens of thousands of words. # It would be better if we could simply replace the absolute values in the counter by frequencies. # This is actually fairly easy. # The `[x]` notation is not only useful for retrieving the value of an element, it also allows us to **specify** the value of an element. # + id="sZhMOKFkHMJi" outputId="ccd26cf3-cefc-418c-bd47-8d493dc183d8" # define our test counter test = Counter(["a", "a", "a", "a", "a", "a", "b", "b", "b", "b", "c", "c"]) print(test) # let's change the value of "a"; # here's what it is right now print("a's current value:", test["a"]) # and now we'll change it to 0.1 test["a"] = 0.1 print("a's new value:", test["a"]) # and now we add a new element "d" to the counter test["d"] = 10 print("d was added with count:", test["d"]) print("The new counter is:", test) # + [markdown] id="cjrop-jBHMJj" # Now we can finalize the `frequencies` function. # Instead of printing the frequency of `current_word`, the function should override the value of `current_word` in `word_counter` with `frequency`. # At the end, the function returns `word_counter`. # You can test your code in the second cell. # The results should be `0.t` for `a`, `0.3333` for `b`, and `0.1666` for `c`. # + id="ibApK6fZHMJj" # change and complete the code below def frequencies(word_counter): # add an updated docstring here total = sum(Counter.values(word_counter)) for current_word in word_counter: number_of_tokens = word_counter[current_word] frequency = number_of_tokens / total word_counter[current_word] = frequency # this part needs to change return word_counter # + colab={"base_uri": "https://localhost:8080/"} id="qV3J83FVHMJj" outputId="91ae72f3-d41a-47dd-8aeb-9fc153c8ba00" # test your code here test_counts = Counter(["a", "a", "a", "a", "a", "a", "b", "b", "b", "b", "c", "c"]) print(test_counts) test_frequency = frequencies(test_counts) print(test_frequency) # + [markdown] id="8QDW3GDyHMJj" # ### An unintended side-effect # # Let us run the test one more time, with just a minor change in the order of the `print`-statements. # Now we first compute `test_frequency` and the print `test_counts` and `test_frequency`. # + colab={"base_uri": "https://localhost:8080/"} id="_Xnv58fBHMJk" outputId="ffcc26b9-6e5d-444e-e0c0-26c72de66ab5" # test your code here test_counts = Counter(["a", "a", "a", "a", "a", "a", "b", "b", "b", "b", "c", "c"]) test_frequency = frequencies(test_counts) print(test_counts) print(test_frequency) # + [markdown] id="3DKmL7EQHMJk" # Uhm, what's going on here? # Why do `test_counts` and `test_frequency` look the same? # Where did the absolute word counts go? # # The problem is with how we wrote the function `frequencies`. # This is a function that takes a word counter as an argument and then **overwrites** the count of each word type with its frequency. # So if we run `frequencies` over `test_counts`, all the values of `test_counts` are replaced by frequencies. # That's not really what we want. # Instead, we want to produce a copy of `test_counts` with frequencies while keeping the original version of `test_counts` untouched. # We can create a copy of a counter with the function `Counter.copy`. # + id="wYKHu8xpHMJk" # test your code here test_counts = Counter(["a", "a", "a", "a", "a", "a", "b", "b", "b", "b", "c", "c"]) test_frequency = frequencies(Counter.copy(test_counts)) print(test_counts) print(test_frequency) # + [markdown] id="XXXJ09JMHMJk" # Now we now longer run `frequencies` on `test_counts`, but a dynamically created copy of `test_counts`. # Hence the values of `test_counts` remain unaltered, and we get different outputs for `print` at the end. # + [markdown] id="U8stJWpHHMJk" # **Python Practice.** # Copy-paste your definition of the `frequencies` function into the cell below, then change it so that it always creates a copy `temp_copy` of `word_counter` at the beginning and then carries out all operations over `temp_copy` instead of `word_counter`. # Then run the code in the next cell to verify that your new definition of `frequencies` works correctly. # + id="VMo2QXrSHMJl" # copy-paste your code for frequencies here, then modify it as described # + id="1fQ6oD34HMJl" # test your code here test_counts = Counter(["a", "a", "a", "a", "a", "a", "b", "b", "b", "b", "c", "c"]) test_frequency = frequencies(test_counts) print(test_counts) print(test_frequency) # + [markdown] id="Xij_k77rLN-m" # Ok, we have now seen how to obtain unigram frequencies out of a corpus, and compare frequencies across corpora. # # Note that under MLE appraoch, we said that counts can approximate probabilities, so that $P(A)$ can be approximated by: # # <center> $\large p(A) \approx \frac{count(A)}{\sum_{w \in V}count(w)}$ </center> # # So we now have a pythonic way of getting unigram probabilities out of a corpus. Can we extend this approach to n-grams? # # Well, first, we must obtain the n-grams! Let's work with bigrams. # Many Python libraries can extract bigrams for us, but it is also fairly easy to do it from scratch. # + id="10RsbI6HL11-" #this function takes a list of words #and returns a list of bigrams def bigrammer(input_list): bigram_list = [] for i in range(len(input_list)-1): bigram_list.append((input_list[i], input_list[i+1])) return bigram_list # + id="VyV0NKqUMtbZ" hamlet_bigrams = bigrammer(tokenize(hamlet_full)) mars_bigrams = bigrammer(tokenize(mars_full)) # + colab={"base_uri": "https://localhost:8080/"} id="1J-bp5rPM2tk" outputId="01614412-dc72-4bc1-fa35-7e9f115ae6be" #let's check what we got pprint(hamlet_bigrams[:10]) pprint(mars_bigrams[:10]) # + [markdown] id="ULG-ikVCOBWp" # **Question:** What do you think the reason is to want the bigrams as tuples and not a single string? # + [markdown] id="nlE4ZHFwON0q" # # Computing Conditional Probabilities # # Recall that a conditional probability $P(B | A)$ is a measure that allows us to estimate how many of our observations of $B$ occur having already seen $A$. # We can compute it using an MLE approach. # # We have already seen that $p(A)$ can be approximated by: # # <center> $\large p(A) \approx \frac{count(A)}{\sum_{w \in V}count(w)}$ </center> # # In the previous lecture, we then saw that we can also directly approximate $P(B | A)$ as: # # <center> $\large p(A) \approx \frac{count(A,B)}{\sum_{w \in V}count(A)}$ </center> # # # + id="R-wmHn5lPrPk" # Let's get bigram counts hamlet_bgcounts = Counter(hamlet_bigrams) # + id="Goz7qLAnQAoQ" def conditional(bg_counter,word_counter): "Obtain Conditional bigram probabilites according to MLE" bg_counter = Counter.copy(bg_counter) for current_bg in bg_counter: number_of_tokens = bg_counter[current_bg] cond_prob = number_of_tokens / word_counter[current_bg[0]] bg_counter[current_bg] = cond_prob return bg_counter # + [markdown] id="6l9sxZLUSmI6" # # + id="rZlWSshoRZKk" # Let's test it test_conditional = conditional(hamlet_bgcounts,hamlet) # + [markdown] id="Hygfsg6rRmWW" # **Python practice.** Now test the fucntion by printing the conditional probabilities for the first 15 bigrams in hamlet. # # + id="a8izmrFfSUFz"
02_Ngrams/01_frequencies.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Exploratory Data Analysis on FHIR # + # #!pip install pandas # #!pip install requests # #!pip install altair # - # NOTE: must be anonymized # ### Install Anaconda environment # ### Install Python SMART on FHIR client # See docs: http://docs.smarthealthit.org/client-py/ # ### Generate test data # Download FHIR STU3 sample data from Synthea # https://synthetichealth.github.io/synthea/ # + import os DATA_PATH = os.path.join(".", "data") FHIR_1K_PATIENTS_PATH = os.path.join(DATA_PATH, "1k-patients") # !mkdir {DATA_PATH} # !ls # !mkdir {FHIR_1K_PATIENTS_PATH} # !ls {FHIR_1K_PATIENTS_PATH} # - # + import requests SAMPLE_DATA_URL="https://syntheticmass.mitre.org/downloads/2017_11_06/synthea_sample_data_fhir_stu3_nov2017.zip" response = requests.get(SAMPLE_DATA_URL) DATA_PATH = os.path.join(".", "data") FHIR_1K_PATIENTS_PATH = os.path.join(DATA_PATH, "1k-patients") FHIR_1K_PATIENTS_ZIP_PATH = os.path.join(FHIR_1K_PATIENTS_PATH, "1k-patients.zip") file = open(FHIR_1K_PATIENTS_ZIP_PATH, 'wb') file.write(response.content) file.close() # - # We got 20MB zip file # !ls -la {FHIR_1K_PATIENTS_PATH} import zipfile zip = zipfile.ZipFile(FHIR_1K_PATIENTS_ZIP_PATH) zip.extractall(FHIR_1K_PATIENTS_PATH) zip.close() # !ls -ls {FHIR_1K_PATIENTS_PATH} # !ls -lS {FHIR_1K_PATIENTS_PATH}/fhir | head -10 # Ok, so there is `all_prevalences.csv` file and `fhir` directory which contains the JSONs. The largest JSON files are ~1MB. And in addition to patient related JSON files, there is also other type of information, e.g. hospital information. # ### What's in `all_prevalences.csv`? import pandas as pd df = pd.read_csv(os.path.join(FHIR_1K_PATIENTS_PATH, "all_prevalences.csv")) df.head() df.info() df.describe() df["POPULATION TYPE"].value_counts() df["POPULATION COUNT"].value_counts() # Let's just remove population type and count columns as they don't provide any information. # axis=1 to drop columns instead of rows # inplace=True to modify the data frame inplace, returns None instead of the data frame df.drop(["POPULATION TYPE", "POPULATION COUNT"], axis=1, inplace=True) df.head() # Occurrences, prevalence rate and prevalence percentage all provide the same information. Let's keep just one of them and drop others. df.drop(["PREVALENCE RATE", "PREVALENCE PERCENTAGE"], axis=1, inplace=True) df.head() # Items with highest and lowest prevalence rates: df.nlargest(10, "OCCURRENCES") df.nsmallest(10, "OCCURRENCES") # Items are disorders, situations, medication... df.to_csv(os.path.join(FHIR_1K_PATIENTS_PATH, "stripped_prevalences.csv")) # !ls {FHIR_1K_PATIENTS_PATH} # ### Exploring JSONs import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import altair as alt # Questions to ask? # Examples: # # What types of cars are most likely to be pulled over for speeding? # What times of day are police most active? # How common are "speed traps"? Or are tickets spread pretty evenly in terms of geography? # What are the most common things people are pulled over for? # Let's see what kind of structure a FHIR JSON has. Let's first find the smallest JSON. JSONS_PATH = os.path.join(FHIR_1K_PATIENTS_PATH, "fhir") # !ls -lSr {JSONS_PATH} | head -4 JSON_PATH = os.path.join(JSONS_PATH, "Pfannerstill249_Richelle643_0.json") # !head {JSON_PATH} # https://www.dataquest.io/blog/python-json-tutorial/ # "From this, we can tell that the JSON data is a dictionary, and is well formatted. rypw is a top level key, and is indented two spaces." import json from pandas.io.json import json_normalize with open(JSON_PATH) as json_data: d = json.load(json_data) type(d) d.keys() d['type'] # collection Collection The bundle is a set of resources collected into a single package for ease of distribution. # https://www.hl7.org/fhir/valueset-bundle-type.html d['resourceType'] # + #d['entry'] # - bundle_df = json_normalize(d['entry']) bundle_df.head() # https://www.kaggle.com/jboysen/quick-tutorial-flatten-nested-json-in-pandas # "We see more nested columns: resource.category etc.. json_normalize docs give us some hints how to flatten semi-structured data further. Let's unpack the works column into a standalone dataframe. We'll also grab the flat columns so we can do analysis. "
FHIR-bundle-JSON.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="FgtBtiBmV1fD" colab_type="text" # # Grammatical Chunk Matching with NLU # With the chunker you can filter a data set based on Part of Speech Tags with Regex patterns. # # I.e. You could get all nouns or adjectives in your datset with the following parameterization. # ``` # pipe['default_chunker'].setRegexParsers(['<NN>+', '<JJ>+']) # ``` # # See [here](https://www.rexegg.com/regex-quickstart.html) for a great reference of Regex operators # # ## Overview of all Part of Speech Tags : # # # |Tag |Description | Example| # |------|------------|------| # |CC| Coordinating conjunction | This batch of mushroom stew is savory **and** delicious | # |CD| Cardinal number | Here are **five** coins | # |DT| Determiner | **The** bunny went home | # |EX| Existential there | **There** is a storm coming | # |FW| Foreign word | I'm having a **déjà vu** | # |IN| Preposition or subordinating conjunction | He is cleverer **than** I am | # |JJ| Adjective | She wore a **beautiful** dress | # |JJR| Adjective, comparative | My house is **bigger** than yours | # |JJS| Adjective, superlative | I am the **shortest** person in my family | # |LS| List item marker | A number of things need to be considered before starting a business **,** such as premises **,** finance **,** product demand **,** staffing and access to customers | # |MD| Modal | You **must** stop when the traffic lights turn red | # |NN| Noun, singular or mass | The **dog** likes to run | # |NNS| Noun, plural | The **cars** are fast | # |NNP| Proper noun, singular | I ordered the chair from **Amazon** | # |NNPS| Proper noun, plural | We visted the **Kennedys** | # |PDT| Predeterminer | **Both** the children had a toy | # |POS| Possessive ending | I built the dog'**s** house | # |PRP| Personal pronoun | **You** need to stop | # |PRP$| Possessive pronoun | Remember not to judge a book by **its** cover | # |RB| Adverb | The dog barks **loudly** | # |RBR| Adverb, comparative | Could you sing more **quietly** please? | # |RBS| Adverb, superlative | Everyone in the race ran fast, but John ran **the fastest** of all | # |RP| Particle | He ate **up** all his dinner | # |SYM| Symbol | What are you doing **?** | # |TO| to | Please send it back **to** me | # |UH| Interjection | **Wow!** You look gorgeous | # |VB| Verb, base form | We **play** soccer | # |VBD| Verb, past tense | I **worked** at a restaurant | # |VBG| Verb, gerund or present participle | **Smoking** kills people | # |VBN| Verb, past participle | She has **done** her homework | # |VBP| Verb, non-3rd person singular present | You **flit** from place to place | # |VBZ| Verb, 3rd person singular present | He never **calls** me | # |WDT| Wh-determiner | The store honored the complaints, **which** were less than 25 days old | # |WP| Wh-pronoun | **Who** can help me? | # |WP\$| Possessive wh-pronoun | **Whose** fault is it? | # |WRB| Wh-adverb | **Where** are you going? | # # # # # # # # # Chunks are Named # # + id="M2-GiYL6xurJ" colab_type="code" colab={} import os # ! apt-get update -qq > /dev/null # Install java # ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] # ! pip install nlu > /dev/null # + [markdown] id="NYQRU3pRO146" colab_type="text" # # 2. Load the Chunker and print parameters # + id="pmpZSNvGlyZQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 411} executionInfo={"status": "ok", "timestamp": 1600190082391, "user_tz": -120, "elapsed": 97438, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="ee8861f0-8ea5-406d-d438-829a30cbdc8f" import nlu pipe = nlu.load('match.chunks') # Now we print the info to see at which index which com,ponent is and what parameters we can configure on them pipe.print_info() # + [markdown] id="9RRmIv9ZbaX3" colab_type="text" # # 3. Configure pipe to only match nounds and adjvectives and predict on data # + id="j2ZZZvr1uGpx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1600190089720, "user_tz": -120, "elapsed": 104728, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="4928cd89-137e-43ed-eafb-bddcd14e22cb" # Lets set our Chunker to only match NN pipe['default_chunker'].setRegexParsers(['<NN>+', '<JJ>+']) # Now we can predict with the configured pipeline pipe.predict("Jim and Joe went to the big blue market next to the town hall")
examples/collab/Chunkers/NLU_Chunking_Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Django Shell-Plus # language: python # name: django_extensions # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Name" data-toc-modified-id="Name-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Name</a></span></li><li><span><a href="#Search" data-toc-modified-id="Search-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Search</a></span><ul class="toc-item"><li><span><a href="#Load-Cached-Results" data-toc-modified-id="Load-Cached-Results-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Load Cached Results</a></span></li><li><span><a href="#Run-From-Scratch" data-toc-modified-id="Run-From-Scratch-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Run From Scratch</a></span></li></ul></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Analysis</a></span><ul class="toc-item"><li><span><a href="#Gender-Breakdown" data-toc-modified-id="Gender-Breakdown-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Gender Breakdown</a></span></li><li><span><a href="#Face-Sizes" data-toc-modified-id="Face-Sizes-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Face Sizes</a></span></li><li><span><a href="#Appearances-on-a-Single-Show" data-toc-modified-id="Appearances-on-a-Single-Show-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Appearances on a Single Show</a></span></li><li><span><a href="#Screen-Time-Across-All-Shows" data-toc-modified-id="Screen-Time-Across-All-Shows-3.4"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Screen Time Across All Shows</a></span></li></ul></li><li><span><a href="#Persist-to-Cloud" data-toc-modified-id="Persist-to-Cloud-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Persist to Cloud</a></span><ul class="toc-item"><li><span><a href="#Save-Model-to-GCS" data-toc-modified-id="Save-Model-to-GCS-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Save Model to GCS</a></span><ul class="toc-item"><li><span><a href="#Make-sure-the-GCS-file-is-valid" data-toc-modified-id="Make-sure-the-GCS-file-is-valid-4.1.1"><span class="toc-item-num">4.1.1&nbsp;&nbsp;</span>Make sure the GCS file is valid</a></span></li></ul></li><li><span><a href="#Save-Labels-to-DB" data-toc-modified-id="Save-Labels-to-DB-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Save Labels to DB</a></span><ul class="toc-item"><li><span><a href="#Commit-the-person-and-labeler" data-toc-modified-id="Commit-the-person-and-labeler-4.2.1"><span class="toc-item-num">4.2.1&nbsp;&nbsp;</span>Commit the person and labeler</a></span></li><li><span><a href="#Commit-the-FaceIdentity-labels" data-toc-modified-id="Commit-the-FaceIdentity-labels-4.2.2"><span class="toc-item-num">4.2.2&nbsp;&nbsp;</span>Commit the FaceIdentity labels</a></span></li></ul></li></ul></li></ul></div> # - from esper.prelude import * from esper.identity import * from esper import embed_google_images # # Name name = '<NAME>' # # Search # ## Load Cached Results assert name != '' results = FaceIdentityModel.load(name=name) imshow(np.hstack([cv2.resize(x[1][0], (200, 200)) for x in results.model_params['images']])) plt.show() plot_precision_and_cdf(results) # ## Run From Scratch # Run this section if you do not have a cached model and precision curve estimates. assert name != '' img_dir = embed_google_images.fetch_images(name) face_imgs = load_and_select_faces_from_images(img_dir) face_embs = embed_google_images.embed_images(face_imgs) assert(len(face_embs) == len(face_imgs)) imshow(np.hstack([cv2.resize(x[0], (200, 200)) for x in face_imgs if x])) plt.show() face_ids_by_bucket, face_ids_to_score = face_search_by_embeddings(face_embs) precision_model = PrecisionModel(face_ids_by_bucket) print('Select all MISTAKES. Ordered by DESCENDING score. Expecting {} frames'.format(precision_model.get_lower_count())) lower_widget = precision_model.get_lower_widget() lower_widget print('Select all NON-MISTAKES. Ordered by ASCENDING distance. Expecting {} frames'.format(precision_model.get_upper_count())) upper_widget = precision_model.get_upper_widget() upper_widget # Run the following cell after labelling. lower_precision = precision_model.compute_precision_for_lower_buckets(lower_widget.selected) upper_precision = precision_model.compute_precision_for_upper_buckets(upper_widget.selected) # + precision_by_bucket = {**lower_precision, **upper_precision} results = FaceIdentityModel( name=name, face_ids_by_bucket=face_ids_by_bucket, face_ids_to_score=face_ids_to_score, precision_by_bucket=precision_by_bucket, model_params={ 'images': list(zip(face_embs, face_imgs)) } ) plot_precision_and_cdf(results) # - # Save the model results.save() # # Analysis # ## Gender Breakdown # + gender_breakdown = compute_gender_breakdown(results) print('Raw counts:') for k, v in gender_breakdown.items(): print(' ', k, ':', v) print() print('Proportions:') denominator = sum(v for v in gender_breakdown.values()) for k, v in gender_breakdown.items(): print(' ', k, ':', v / denominator) print() # - print('Showing examples:') show_gender_examples(results) # ## Face Sizes plot_histogram_of_face_sizes(results) # ## Appearances on a Single Show show_name = 'CNN Newsroom With <NAME>' screen_time_by_video_id = compute_screen_time_by_video(results, show_name) plot_histogram_of_screen_times_by_video(name, show_name, screen_time_by_video_id) plot_screentime_over_time(name, show_name, screen_time_by_video_id) plot_distribution_of_appearance_times_by_video(results, show_name) # ## Screen Time Across All Shows screen_time_by_show = get_screen_time_by_show(results) plot_screen_time_by_show(name, screen_time_by_show) # # Persist to Cloud # ## Save Model to GCS gcs_model_path = results.save_to_gcs() # ### Make sure the GCS file is valid gcs_results = FaceIdentityModel.load_from_gcs(name=name) plot_precision_and_cdf(gcs_results) # ## Save Labels to DB # + from django.core.exceptions import ObjectDoesNotExist def standardize_name(name): return name.lower() person_type = ThingType.objects.get(name='person') try: person = Thing.objects.get(name=standardize_name(name), type=person_type) print('Found person:', person.name) except ObjectDoesNotExist: person = Thing(name=standardize_name(name), type=person_type) print('Creating person:', person.name) labeler = Labeler(name='face-identity-{}'.format(person.name), data_path=gcs_model_path) # - # ### Commit the person and labeler person.save() labeler.save() # ### Commit the FaceIdentity labels commit_face_identities_to_db(results, person, labeler) print('Committed {} labels to the db'.format(FaceIdentity.objects.filter(labeler=labeler).count()))
app/notebooks/labeled_identities/hosts/carol_costello.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural networks with PyTorch # # Next I'll show you how to build a neural network with PyTorch. # + # Import things like usual # %matplotlib inline # %config InlineBackend.figure_format = 'retina' import numpy as np import torch import helper import matplotlib.pyplot as plt # Standard datasets we can download # We're going to download MNIST: 0-9 digits in 28x28x1 images from torchvision import datasets, transforms # - # First up, we need to get our dataset. This is provided through the `torchvision` package. The code below will download the MNIST dataset, then create training and test datasets for us. Don't worry too much about the details here, you'll learn more about this later. # + # Define a transform to normalize the data # First: convert images to tensors # Second: normalize them to contain vaues [-1,1]; original data contains [0,1] # Note: I had to change .Normalize because original images have 1 channels instead of 3 # First value is substracted, second divided # ([0,1]-0.5)/0.5 = [-1,1] transform = transforms.Compose([transforms.ToTensor(), #transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)), transforms.Normalize((0.5,), (0.5,)), ]) # Download and load the TRAINING data # batch_size=64 -> trainloader will give us 64 images at a time! # train=True -> it's going to be used for training! trainset = datasets.MNIST('MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the TEST data testset = datasets.MNIST('MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) # - # We create an interator that provides uns with the next batch or images+labels dataiter = iter(trainloader) images, labels = dataiter.next() # We have the training data loaded into `trainloader` and we make that an iterator with `iter(trainloader)`. We'd use this to loop through the dataset for training, but here I'm just grabbing the first batch so we can check out the data. We can see below that `images` is just a tensor with size (64, 1, 28, 28). So, 64 images per batch, 1 color channel, and 28x28 images. # Display image with index 1 from batch # For that, transform it to Numpy and squeeze it (remove single-dimensional entries) plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r'); # We can check size: batch_size, channels, rows, columns images.size() # ## Building networks with PyTorch # # Here I'll use PyTorch to build a simple feedfoward network to classify the MNIST images. That is, the network will receive a digit image as input and predict the digit in the image. # # <img src="assets/mlp_mnist.png" width=600px> # # To build a neural network with PyTorch, you use the `torch.nn` module. The network itself is a class inheriting from `torch.nn.Module`. You define each of the operations separately, like `nn.Linear(784, 128)` for a fully connected linear layer with 784 inputs and 128 units. # # The class needs to include a `forward` method that implements the forward pass through the network. In this method, you pass some input tensor `x` through each of the operations you defined earlier. The `torch.nn` module also has functional equivalents for things like ReLUs in `torch.nn.functional`. This module is usually imported as `F`. Then to use a ReLU activation on some layer (which is just a tensor), you'd do `F.relu(x)`. Below are a few different commonly used activation functions. # # <img src="assets/activation.png" width=700px> # # So, for this network, I'll build it with three fully connected layers, then a softmax output for predicting classes. The softmax function is similar to the sigmoid in that it squashes inputs between 0 and 1, but it's also normalized so that all the values sum to one like a proper probability distribution. # + # First layer has as inputs rows x columns of input images: 28x28 = 784 # Last layer must have as outputs number of predicted classes: 10 (0-9) # Hidden layers defined arbitrarily, but they must match first and last # Usually, the higher the number of layers and nodes in them, the better # BUT: most of the times, DL consists in finding the best number of layers, nodes, etc. # Note: layers have in_features and out_features; instead of unit/neuron layers, # they are the representation of the weight matrix that connects two layers; # as such the units are represented by the outputs. # Activation functions: use ReLU except in output: softmax (because we want probability of classes) # Loss function: cross-entropy # - from torch import nn # Network class from torch import optim import torch.nn.functional as F # Activation functions # + # Network class is inherited from nn.Module class Network(nn.Module): def __init__(self): # Call inits of upper classes: nn.Module super().__init__() # Define layers # Fully connected (fc) is basically a Linear transformation # Inputs: 28x28 = 784 # Outputs: 128, 64, 10 self.fc1 = nn.Linear(784, 128) self.fc2 = nn.Linear(128, 64) # Output layer, 10 outputs - one for each digit self.fc3 = nn.Linear(64, 10) def forward(self, x): ''' Forward pass through the network, returns the output logits ''' # x is a tensor of data which will be passed through all layers # after each layer pass, another tensor with another size is returned x = self.fc1(x) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) # Final tensor should have a size batch_size x units: 64 x 10 # We need to specify where to apply softmax: 2nd dimension, which contains class values x = F.softmax(x, dim=1) return x model = Network() model # - # ### Initializing weights and biases # # The weights and such are automatically initialized for you, but it's possible to customize how they are initialized. The weights and biases are tensors attached to the layer you defined, you can get them with `model.fc1.weight` for instance. # Weights and biases are autmatically created! # So we don't need to do anything more here print(model.fc1.weight) print(model.fc1.bias) # For custom initialization, we want to modify these tensors in place. These are actually autograd *Variables*, so we need to get back the actual tensors with `model.fc1.weight.data`. Once we have the tensors, we can fill them with zeros (for biases) or random normal values. # We could set biases to all zeros model.fc1.bias.data.fill_(0) # We could sample from random normal (mean = 0) with standard dev = 0.01 model.fc1.weight.data.normal_(std=0.01) # ### Forward pass # # Now that we have a network, let's see what happens when we pass in an image. This is called the forward pass. We're going to convert the image data into a tensor, then pass it through the operations defined by the network architecture. # + # Grab some data dataiter = iter(trainloader) images, labels = dataiter.next() # Dataset images has a size of (batch_size, channels, rows, cols) # Resize images into a 1D vector # New shape is (batch size, color channels, image pixels) batch_size = images.shape[0] channels = images.shape[1] pixels = images.shape[2]*images.shape[3] images.resize_(batch_size, channels, pixels) # Forward pass through the network img_idx = 0 ps = model.forward(images[img_idx,:]) img = images[img_idx] # img.view is basically the same as resize: it returns a new resized tensor helper.view_classify(img.view(1, 28, 28), ps) # - # As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random! # # PyTorch provides a convenient way to build networks like this where a tensor is passed sequentially through operations, `nn.Sequential` ([documentation](https://pytorch.org/docs/master/nn.html#torch.nn.Sequential)). Using this to build the equivalent network: # + # Another option is to define the network with nn.Sequential() # This way, we don't need to manually create a class # Hyperparameters for our network input_size = 784 hidden_sizes = [128, 64] output_size = 10 # Build a feed-forward network model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]), nn.ReLU(), nn.Linear(hidden_sizes[0], hidden_sizes[1]), nn.ReLU(), nn.Linear(hidden_sizes[1], output_size), nn.Softmax(dim=1)) print(model) # Forward pass through the network and display output images, labels = next(iter(trainloader)) images.resize_(images.shape[0], 1, 784) ps = model.forward(images[0,:]) helper.view_classify(images[0].view(1, 28, 28), ps) # - # You can also pass in an `OrderedDict` to name the individual layers and operations. Note that a dictionary keys must be unique, so _each operation must have a different name_. # It is also possible to pass an OrderedDict to nn.Sequential() from collections import OrderedDict model = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_size, hidden_sizes[0])), ('relu1', nn.ReLU()), ('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])), ('relu2', nn.ReLU()), ('output', nn.Linear(hidden_sizes[1], output_size)), ('softmax', nn.Softmax(dim=1))])) model # Now it's your turn to build a simple network, use any method I've covered so far. In the next notebook, you'll learn how to train a network so it can make good predictions. # # >**Exercise:** Build a network to classify the MNIST images with _three_ hidden layers. Use 400 units in the first hidden layer, 200 units in the second layer, and 100 units in the third layer. Each hidden layer should have a ReLU activation function, and use softmax on the output layer. ## TODO: Your network here class MyNetwork(nn.Module): def __init__(self): super().__init__() # 784, 400, 200, 100, 10 self.fc1 = nn.Linear(784, 400) self.fc2 = nn.Linear(400, 200) self.fc3 = nn.Linear(200, 100) self.fc4 = nn.Linear(100, 10) def forward(self,x): x = self.fc1(x) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) x = F.relu(x) x = self.fc4(x) x = F.softmax(x, dim=1) return x model = MyNetwork() model ## Run this cell with your model to make sure it works ## # Forward pass through the network and display output images, labels = next(iter(trainloader)) images.resize_(images.shape[0], 1, 784) ps = model.forward(images[0,:]) helper.view_classify(images[0].view(1, 28, 28), ps)
Part 2 - Neural Networks in PyTorch.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.4.1 # language: julia # name: julia-1.4 # --- # # Newton's Method and Fisher Scoring (KL Chapter 14) # # Consider maximizing log-likelihood function $L(\mathbf{\theta})$, $\theta \in \Theta \subset \mathbb{R}^p$. # # ## Notations # # * **Gradient** (or **score**) of $L$: # $$ # \nabla L(\theta) = \begin{pmatrix} # \frac{\partial L(\theta)}{\partial \theta_1} \\ # \vdots \\ # \frac{\partial L(\theta)}{\partial \theta_p} # \end{pmatrix} # $$ # # * **Hessian** of $L$: # $$ # \nabla^2 L(\theta) = \begin{pmatrix} # \frac{\partial^2 L(\theta)}{\partial \theta_1 \partial \theta_1} & \dots & \frac{\partial^2 L(\theta)}{\partial \theta_1 \partial \theta_p} \\ # \vdots & \ddots & \vdots \\ # \frac{\partial^2 L(\theta)}{\partial \theta_p \partial \theta_1} & \dots & \frac{\partial^2 L(\theta)}{\partial \theta_p \partial \theta_p} # \end{pmatrix} # $$ # # * **Observed information matrix** (negative Hessian): # # $$ # - \nabla^2 L(\theta) # $$ # # * **Expected (Fisher) information matrix**: # $$ # \mathbf{E}[- \nabla^2 L(\theta)]. # $$ # ## Newton's method # # * Newton's method was originally developed for finding roots of nonlinear equations # $f(\mathbf{x}) = \mathbf{0}$ (KL 5.4). # # * Newton's method, aka **Newton-Raphson method**, is considered the gold standard for its fast (quadratic) convergence # $$ # \frac{\|\mathbf{\theta}^{(t+1)} - \mathbf{\theta}^*\|}{\|\mathbf{\theta}^{(t)} - \mathbf{\theta}^*\|^2} \to \text{constant}. # $$ # # * Idea: iterative quadratic approximation. # # * Taylor expansion around the current iterate $\mathbf{\theta}^{(t)}$ # $$ # L(\mathbf{\theta}) \approx L(\mathbf{\theta}^{(t)}) + \nabla L(\mathbf{\theta}^{(t)})^T (\mathbf{\theta} - \mathbf{\theta}^{(t)}) + \frac 12 (\mathbf{\theta} - \mathbf{\theta}^{(t)})^T [\nabla^2L(\mathbf{\theta}^{(t)})] (\mathbf{\theta} - \mathbf{\theta}^{(t)}) # $$ # and then maximize the quadratic approximation. # # * To maximize the quadratic function, we equate its gradient to zero # $$ # \nabla L(\theta^{(t)}) + [\nabla^2L(\theta^{(t)})] (\theta - \theta^{(t)}) = \mathbf{0}_p, # $$ # which suggests the next iterate # $$ # \begin{eqnarray*} # \theta^{(t+1)} &=& \theta^{(t)} - [\nabla^2L(\theta^{(t)})]^{-1} \nabla L(\theta^{(t)}) \\ # &=& \theta^{(t)} + [-\nabla^2L(\theta^{(t)})]^{-1} \nabla L(\theta^{(t)}). # \end{eqnarray*} # $$ # We call this **naive Newton's method**. # # * **Stability issue**: naive Newton's iterate is **not** guaranteed to be an ascent algorithm. It's equally happy to head uphill or downhill. Following example shows that the Newton iterate converges to a local maximum, converges to a local minimum, or diverges depending on starting points. # + using Plots; gr() using LaTeXStrings, ForwardDiff f(x) = sin(x) df = x -> ForwardDiff.derivative(f, x) # gradient d2f = x -> ForwardDiff.derivative(df, x) # hessian x = 2.0 # start point: 2.0 (local maximum), 2.75 (diverge), 4.0 (local minimum) titletext = "Starting point: $x" anim = @animate for iter in 0:10 iter > 0 && (global x = x - d2f(x) \ df(x)) p = Plots.plot(f, 0, 2π, xlim=(0, 2π), ylim=(-1.1, 1.1), legend=nothing, title=titletext) Plots.plot!(p, [x], [f(x)], shape=:circle) Plots.annotate!(p, x, f(x), text(latexstring("x^{($iter)}"), :right)) end gif(anim, "./tmp.gif", fps = 1); # - # ![](./newton_demo_1.gif) # ![](./newton_demo_2.gif) # ![](./newton_demo_3.gif) # * Remedies for the instability issue: # 1. approximate $-\nabla^2L(\theta^{(t)})$ by a positive definite $\mathbf{A}$ (if it's not), **and** # 2. line search (backtracking). # # * Why insist on a _positive definite_ approximation of Hessian? By first-order Taylor expansion, # $$ # \begin{eqnarray*} # & & L(\theta^{(t)} + s \Delta \theta^{(t)}) - L(\theta^{(t)}) \\ # &=& \nabla L(\theta^{(t)})^T s \Delta \theta^{(t)} + o(s) \\ # &=& s \cdot \nabla L(\theta^{(t)})^T [\mathbf{A}^{(t)}]^{-1} \nabla L(\theta^{(t)}) + o(s). # \end{eqnarray*} # $$ # For $s$ sufficiently small, right hand side is strictly positive when $\mathbf{A}^{(t)}$ is positive definite. The quantity $\{\nabla L(\theta^{(t)})^T [\mathbf{A}^{(t)}]^{-1} \nabla L(\theta^{(t)})\}^{1/2}$ is termed the **Newton decrement**. # # * In summary, a **practical Newton-type algorithm** iterates according to # $$ # \boxed{ \theta^{(t+1)} = \theta^{(t)} + s [\mathbf{A}^{(t)}]^{-1} \nabla L(\theta^{(t)}) # = \theta^{(t)} + s \Delta \theta^{(t)} } # $$ # where $\mathbf{A}^{(t)}$ is a pd approximation of $-\nabla^2L(\theta^{(t)})$ and $s$ is a step length. # # * For strictly concave $L$, $-\nabla^2L(\theta^{(t)})$ is always positive definite. Line search is still needed to guarantee convergence. # # * Line search strategy: step-halving ($s=1,1/2,\ldots$), golden section search, cubic interpolation, Amijo rule, ... Note the **Newton direction** # $$ # \Delta \theta^{(t)} = [\mathbf{A}^{(t)}]^{-1} \nabla L(\theta^{(t)}) # $$ # only needs to be calculated once. Cost of line search mainly lies in objective function evaluation. # # * How to approximate $-\nabla^2L(\theta)$? More of an art than science. Often requires problem specific analysis. # # * Taking $\mathbf{A} = \mathbf{I}$ leads to the method of **steepest ascent**, aka **gradient ascent**. # # <img src="http://trond.hjorteland.com/thesis/img208.gif" width="400" align="center"/> # ## Fisher's scoring method # # * **Fisher's scoring method**: replace $- \nabla^2L(\theta)$ by the expected Fisher information matrix # $$ # \mathbf{FIM}(\theta) = \mathbf{E}[-\nabla^2L(\theta)] = \mathbf{E}[\nabla L(\theta) \nabla L(\theta)^T] \succeq \mathbf{0}_{p \times p}, # $$ # which is psd under exchangeability of expectation and differentiation. # # Therefore the Fisher's scoring algorithm iterates according to # $$ # \boxed{ \theta^{(t+1)} = \theta^{(t)} + s [\mathbf{FIM}(\theta^{(t)})]^{-1} \nabla L(\theta^{(t)})}. # $$ # ## Generalized linear model (GLM) (KL 14.7) # ### Logistic regression # # Let's consider a concrete example: logistic regression. # # * The goal is to predict whether a credit card transaction is fraud ($y_i=1$) or not ($y_i=0$). Predictors ($\mathbf{x}_i$) include: time of transaction, last location, merchant, ... # # * $y_i \in \{0,1\}$, $\mathbf{x}_i \in \mathbb{R}^{p}$. Model $y_i \sim $Bernoulli$(p_i)$. # # * Logistic regression. Density # $$ # \begin{eqnarray*} # f(y_i|p_i) &=& p_i^{y_i} (1-p_i)^{1-y_i} \\ # &=& e^{y_i \ln p_i + (1-y_i) \ln (1-p_i)} \\ # &=& e^{y_i \ln \frac{p_i}{1-p_i} + \ln (1-p_i)}, # \end{eqnarray*} # $$ # where # $$ # \begin{eqnarray*} # \mathbf{E} (y_i) = p_i &=& \frac{e^{\eta_i}}{1+ e^{\eta_i}} \quad \text{(mean function, inverse link function)} \\ # \eta_i = \mathbf{x}_i^T \beta &=& \ln \left( \frac{p_i}{1-p_i} \right) \quad \text{(logit link function)}. # \end{eqnarray*} # $$ # # * Given data $(y_i,\mathbf{x}_i)$, $i=1,\ldots,n$, # # $$ # \begin{eqnarray*} # L_n(\beta) &=& \sum_{i=1}^n \left[ y_i \ln p_i + (1-y_i) \ln (1-p_i) \right] \\ # &=& \sum_{i=1}^n \left[ y_i \mathbf{x}_i^T \beta - \ln (1 + e^{\mathbf{x}_i^T \beta}) \right] \\ # \nabla L_n(\beta) &=& \sum_{i=1}^n \left( y_i \mathbf{x}_i - \frac{e^{\mathbf{x}_i^T \beta}}{1+e^{\mathbf{x}_i^T \beta}} \mathbf{x}_i \right) \\ # &=& \sum_{i=1}^n (y_i - p_i) \mathbf{x}_i = \mathbf{X}^T (\mathbf{y} - \mathbf{p}) \\ # - \nabla^2L_n(\beta) &=& \sum_{i=1}^n p_i(1-p_i) \mathbf{x}_i \mathbf{x}_i^T = \mathbf{X}^T \mathbf{W} \mathbf{X}, \quad # \text{where } \mathbf{W} &=& \text{diag}(w_1,\ldots,w_n), w_i = p_i (1-p_i) \\ # \mathbf{FIM}_n(\beta) &=& \mathbf{E} [- \nabla^2L_n(\beta)] = - \nabla^2L_n(\beta). # \end{eqnarray*} # $$ # # * Newton's method == Fisher's scoring iteration: # $$ # \begin{eqnarray*} # \beta^{(t+1)} &=& \beta^{(t)} + s[-\nabla^2 L(\beta^{(t)})]^{-1} \nabla L(\beta^{(t)}) \\ # &=& \beta^{(t)} + s(\mathbf{X}^T \mathbf{W}^{(t)} \mathbf{X})^{-1} \mathbf{X}^T (\mathbf{y} - \mathbf{p}^{(t)}) \\ # &=& (\mathbf{X}^T \mathbf{W}^{(t)} \mathbf{X})^{-1} \mathbf{X}^T \mathbf{W}^{(t)} \left[ \mathbf{X} \beta^{(t)} + s(\mathbf{W}^{(t)})^{-1} (\mathbf{y} - \mathbf{p}^{(t)}) \right] \\ # &=& (\mathbf{X}^T \mathbf{W}^{(t)} \mathbf{X})^{-1} \mathbf{X}^T \mathbf{W}^{(t)} \mathbf{z}^{(t)}, # \end{eqnarray*} # $$ # where # $$ # \mathbf{z}^{(t)} = \mathbf{X} \beta^{(t)} + s(\mathbf{W}^{(t)})^{-1} (\mathbf{y} - \mathbf{p}^{(t)}) # $$ # are the working responses. A Newton's iteration is equivalent to solving a weighed least squares problem $\sum_{i=1}^n w_i (z_i - \mathbf{x}_i^T \beta)^2$. Thus the name **IRWLS (iteratively re-weighted least squares)**. # ### GLM # # Let's consider the more general class of generalized linear models (GLM). # # # | Family | Canonical Link | Variance Function | # |------------------|-------------------------------|-------------------| # | Normal | $\eta=\mu$ | 1 | # | Poisson | $\eta=\log \mu$ | $\mu$ | # | Binomial | $\eta=\log \left( \frac{\mu}{1 - \mu} \right)$ | $\mu (1 - \mu)$ | # | Gamma | $\eta = \mu^{-1}$ | $\mu^2$ | # | Inverse Gaussian | $\eta = \mu^{-2}$ | $\mu^3$ | # # * $Y$ belongs to an exponential family with density # $$ # p(y|\theta,\phi) = \exp \left\{ \frac{y\theta - b(\theta)}{a(\phi)} + c(y,\phi) \right\}. # $$ # * $\theta$: natural parameter. # * $\phi>0$: dispersion parameter. # GLM relates the mean $\mu = \mathbf{E}(Y|\mathbf{x})$ via a strictly increasing link function # $$ # g(\mu) = \eta = \mathbf{x}^T \beta, \quad \mu = g^{-1}(\eta) # $$ # # * Score, Hessian, information # # \begin{eqnarray*} # \nabla L_n(\beta) &=& \sum_{i=1}^n \frac{(y_i-\mu_i) \mu_i'(\eta_i)}{\sigma_i^2} \mathbf{x}_i \\ # \,- \nabla^2 L_n(\boldsymbol{\beta}) &=& \sum_{i=1}^n \frac{[\mu_i'(\eta_i)]^2}{\sigma_i^2} \mathbf{x}_i \mathbf{x}_i^T - \sum_{i=1}^n \frac{(y_i - \mu_i) \mu_i''(\eta_i)}{\sigma_i^2} \mathbf{x}_i \mathbf{x}_i^T \\ # & & + \sum_{i=1}^n \frac{(y_i - \mu_i) [\mu_i'(\eta_i)]^2 (d \sigma_i^{2} / d\mu_i)}{\sigma_i^4} \mathbf{x}_i \mathbf{x}_i^T \\ # \mathbf{FIM}_n(\beta) &=& \mathbf{E} [- \nabla^2 L_n(\beta)] = \sum_{i=1}^n \frac{[\mu_i'(\eta_i)]^2}{\sigma_i^2} \mathbf{x}_i \mathbf{x}_i^T = \mathbf{X}^T \mathbf{W} \mathbf{X}. # \end{eqnarray*} # # * Fisher scoring method # $$ # \beta^{(t+1)} = \beta^{(t)} + s [\mathbf{FIM}(\beta^{(t)})]^{-1} \nabla L_n(\beta^{(t)}) # $$ # IRWLS with weights $w_i = [\mu_i(\eta_i)]^2/\sigma_i^2$ and some working responses $z_i$. # # * For **canonical link**, $\theta = \eta$, the second term of Hessian vanishes and Hessian coincides with Fisher information matrix. Convex problem 😄 # $$ # \text{Fisher's scoring == Newton's method}. # $$ # # * Non-canonical link, non-convex problem 😞 # $$ # \text{Fisher's scoring algorithm} \ne \text{Newton's method}. # $$ # Example: Probit regression (binary response with probit link). # $$ # \begin{eqnarray*} # y_i &\sim& \text{Bernoulli}(p_i) \\ # p_i &=& \Phi(\mathbf{x}_i^T \beta) \\ # \eta_i &=& \mathbf{x}_i^T \beta = \Phi^{-1}(p_i). # \end{eqnarray*} # $$ # where $\Phi(\cdot)$ is the cdf of a standard normal. # # * Julia, R and Matlab implement the Fisher scoring method, aka IRWLS, for GLMs. # * [GLM.jl](https://github.com/JuliaStats/GLM.jl) package. # ## Nonlinear regression - Gauss-Newton method (KL 14.4-14.6) # # * Now we finally get to the problem Gauss faced in 1800! # Relocate Ceres by fitting 41 observations to a 6-parameter (nonlinear) orbit. # # * Nonlinear least squares (curve fitting): # $$ # \text{minimize} \,\, f(\beta) = \frac{1}{2} \sum_{i=1}^n [y_i - \mu_i(\mathbf{x}_i, \beta)]^2 # $$ # For example, $y_i =$ dry weight of onion and $x_i=$ growth time, and we want to fit a 3-parameter growth curve # $$ # \mu(x, \beta_1,\beta_2,\beta_3) = \frac{\beta_3}{1 + e^{-\beta_1 - \beta_2 x}}. # $$ # # <img src="https://cdn.xlstat.com/img/tutorials/nlin5.gif" width="300" align="center"/> # # * "Score" and "information matrices" # $$ # \begin{eqnarray*} # \nabla f(\beta) &=& - \sum_{i=1}^n [y_i - \mu_i(\beta)] \nabla \mu_i(\beta) \\ # \nabla^2 f(\beta) &=& \sum_{i=1}^n \nabla \mu_i(\beta) \nabla \mu_i(\beta)^T - \sum_{i=1}^n [y_i - \mu_i(\beta)] \nabla^2 \mu_i(\beta) \\ # \mathbf{FIM}(\beta) &=& \sum_{i=1}^n \nabla \mu_i(\beta) \nabla \mu_i(\beta)^T = \mathbf{J}(\beta)^T \mathbf{J}(\beta), # \end{eqnarray*} # $$ # where $\mathbf{J}(\beta)^T = [\nabla \mu_1(\beta), \ldots, \nabla \mu_n(\beta)] \in \mathbb{R}^{p \times n}$. # # * **Gauss-Newton** (= "Fisher's scoring algorithm") uses $\mathbf{I}(\beta)$, which is always psd. # $$ # \boxed{ \beta^{(t+1)} = \beta^{(t)} + s [\mathbf{FIM} (\beta^{(t)})]^{-1} \nabla L(\beta^{(t)}) } # $$ # # * **Levenberg-Marquardt** method, aka **damped least squares algorithm (DLS)**, adds a ridge term to the approximate Hessian # $$ # \boxed{ \beta^{(t+1)} = \beta^{(t)} + s [\mathbf{FIM} (\beta^{(t)}) + \tau \mathbf{I}_p]^{-1} \nabla L(\beta^{(t)}) } # $$ # bridging between Gauss-Newton and steepest descent. # # * Other approximation to Hessians: nonlinear GLMs. # See KL 14.4 for examples.
slides/23-newton/newton.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project 2 : Reacher Project (Continuous Control) # # ## Project Report # # This is the report for the second project in the Udacity Deep Reinforcement Learning Nanodegree. The purpose of this project is to let us learn to use policy-based deep reinforcement learning algorithm to train an agent/agents. In this environment, a double-jointed arm can move to target locations. A reward of +0.1 is provided for each step that the agent's hand is in the goal location. Thus, **the goal of the agent is to maintain its position at the target location for as many time steps as possible.** # # # # I choose to solve this project by DDPG algorithm. I am using the code for DDPG from https://github.com/udacity/deep-reinforcement-learning/tree/master/ddpg-pendulum as a starting point. # # I refer to https://github.com/tlan95/Udacity-DRL-nanodegree-project2-Continuous-Control for the methods to refine the DDPG algorithm hyperparameters. # # The codes for fine-tuning the hyperparameters and agent training are in **'Continuous_Control.ipynb'**. In that notebook, we run seveveral combinations of hypter parameters for 100 episodes and see their performanced. Then the agent is trained with one set of hyperparameters. The final result show that the agent is ablt to obtain average score of 30 over 100 episodes. # # In the report, we will see the performance of the trained model.To do so, we will load agent with pretrained parameters and let the agent play with the environment. We then will check the average score of playing 100 episode. # ## Environment details # # The environment is based on [Unity ML-agents](https://github.com/Unity-Technologies/ml-agents). The project environment provided by Udacity is similar to the [Reacher](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Learning-Environment-Examples.md#reacher) environment on the Unity ML-Agents GitHub page. # # The observation space consists of 33 variables corresponding to position, rotation, velocity, and angular velocities of the arm. Each action is a vector with four numbers, corresponding to torque applicable to two joints. Every entry in the action vector should be a number between -1 and 1. # # # ### Code implementation # # The code for this project is built upon the example from https://github.com/udacity/deep-reinforcement-learning/tree/master/ddpg-pendulum. The codes include: # # - 'Continuous_Control.ipynb': main code for fine-tuning and training the model; # # - 'ddpg_agent.py': contains the class for DDPG agent and class for replay buffer. The construnction of these two classes are very similar to DQN. The major difference is that the the DDPG agent acts according to the actor_local network, and the actor_local network is updated alongside the critic_local network when the 'learn()' method is called, while in the DQN, the agent acts according the Q_network and only Q_network is updated. # # - 'model.py' : contains the neural network for the actor network and critic network. One batch normalization layer is added after the first linear layer in both the actor and critic networks. # # # ### DDPG parameters and results # # #### Methodology # # I refer to the method in https://github.com/tlan95/Udacity-DRL-nanodegree-project2-Continuous-Control to refine the DDPG algorithm hyperparameters. One inspiration from this method is that the hyper parameters are identified and can be directly modified when calling the "ddpg" function. # The set of hyper parameters of the DDPG algorithm copied from the Udacity DRL includes: # ``` # max_t=1000, # maximum allowed time step in each episode # random_seed=2, # update_every=1,# time steps before neural network update from minibatch # actor_fc1_units=400, # number of neurons in first layer in actor network # actor_fc2_units=300, # number of neurons in second layer in actor network # critic_fcs1_units=400, # number of neurons in first layer in critic network # critic_fc2_units=300, # number of neurons in second layer in critic network # # gamma=0.99, #discount rate # tau=1e-3, #soft update rate # # lr_actor=1e-4, # actor network learning rate # lr_critic=1e-3, # critic network learning rate # weight_decay=0, # neural network weight decay rate # # mu=0., # Ornstein-Uhlenbeck noise parameter # theta=0.15, # Ornstein-Uhlenbeck noise parameter # sigma=0.2 # Ornstein-Uhlenbeck noise parameter # ``` # The following hyper parameters have been changed: # # - It has been found that max_t should be set to 1000 because Unity Environment will only consider the game "done" after 1000 time steps. # - actor_fc1_units,actor_fc2_units,critic_fcs1_units,critic_fc2_units. Using smaller neural network can help the agent train better and faster; # - lr_actor and lr_critic. Learning rate can be tweaked to achieve better results; # - sigma. sigma can be reduced to achieve better training result. # - update_every defines after how many time steps the actor and critic network are updated. Using a value larger than 1 can help reduce training time and episodes. # # As shown in the notebook, after tweaking around 7 sets of hyperparameters, I settle down on a final set of hyperparameters: # ``` # max_t=1000, # update_every=4, # actor_fc1_units=200, # actor_fc2_units=150, # critic_fcs1_units=200, # critic_fc2_units=150, # gamma=0.99, # tau=1e-3, # lr_actor=5e-4, # lr_critic=5e-4, # weight_decay=0, # mu=0., # theta=0.15, # sigma=0.1 # ``` # The scores during training is plot below. It can be observed it takes about 500 episodes to train the agent to obtain an average score over 30. # ![title](imag/1agent_training.png) # # # We will use the trained agent to play a game for 100 episodes, and shows that it can achieve average score of 30 over 100 episodes. from unityagents import UnityEnvironment import numpy as np import torch import matplotlib.pyplot as plt from collections import deque import numpy as np from unityagents import UnityEnvironment env = UnityEnvironment(file_name="Reacher.app") # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # load the weights from file def let_agent_play(): agent.actor_local.load_state_dict(torch.load('checkpoint_actor.pth')) agent.critic_local.load_state_dict(torch.load('checkpoint_critic.pth')) max_t=1000 play_episodes=100 play_scores = [] # list containing scores from each episode play_scores_window = deque(maxlen=100) # last 100 scores for i_episode in range(1, play_episodes+1): env_info = env.reset(train_mode=True)[brain_name] states = env_info.vector_observations agent.reset() score = np.zeros(1) for t in range(max_t): actions = agent.act(states) env_info = env.step(actions)[brain_name] # send all actions to tne environment next_states = env_info.vector_observations # get next state (for each agent) rewards = env_info.rewards # get reward (for each agent) dones = env_info.local_done # see if episode finished score += rewards # update the score (for each agent) states = next_states # roll over states to next time step if np.any(dones): break play_scores_window.append(np.mean(score)) # save most recent score play_scores.append(np.mean(score)) # save most recent score print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(play_scores_window)), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(play_scores_window))) return play_scores # plot the scores def plot_dqn(scores): fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores) plt.ylabel('Score') plt.xlabel('Episode #') plt.show() from ddpg_agent import Agent agent = Agent(state_size=33, action_size=4, random_seed=2,update_every=4,\ actor_fc1_units=200, actor_fc2_units=150,\ critic_fcs1_units=200, critic_fc2_units=150,\ gamma=0.99, tau=1e-3,lr_actor=5e-4, lr_critic=5e-4, weight_decay=0,\ mu=0., theta=0.15, sigma=0.1) play_scores=let_agent_play() plot_dqn(play_scores) # ### Observation of the 20 agent version # As you may notice, the code in **'Continuous_Control.ipynb'** is generalized for both single-agent and multi-agent. The same notebook and accompanying **"ddpg_agent.py" "model.py"** have been used to train the 20-agent version as well. The parameter for 20agent is the same with 1 single agent version, except that update_every=20. # The training result is shown below. Compared to the single agent version, the performance of the 20agent training is more efficient-requring less episodes to train. # ![title](imag/20agent_training.png) # # My observation of the 20agent version is that in fact we are only using one agent, but we can interact with 20 copies of environments at the same time. During the trainig, after playing with parallel environments for a certain(20 in this case) time-steps, the agent learns from 20 minibatch consecutively. The improvement of training speed is because we can collect experiences in parallel, reducing correlations among experiences. # # ### Ideas for future work # # As discussed in the Udacity instructions, the next step is to solve the second version and adopt algorithms like PPO, A3C, and D4PG that use multiple (non-interacting, parallel) copies of the same agent to distribute the task of gathering experience. env.close()
Report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.chdir("M:") import numpy as np import pandas as pd import re import matplotlib.pyplot as plt import matplotlib.ticker as ticker import seaborn as sns # %matplotlib notebook corr_matrix_pd = pd.read_csv("corr_matrix_pd.csv", index_col=0) corr_matrix_pd.head() corr_matrix_pd.drop(['MSD_TRACKID'], 1, inplace=True) # + col_names = [] for name in corr_matrix_pd.columns: replaced = re.sub('Area_Method_of_Moments_Overall', 'AMOM', name) col_names.append(replaced) print(col_names) # - corr_matrix_pd.columns = col_names corr_matrix_pd.columns sns.set(font_scale = 1.2) plt.figure(figsize=(16,12)) ax = sns.heatmap(corr_matrix_pd.corr(), xticklabels=True, yticklabels=True, cmap="YlGnBu", annot=True, fmt=".2f") ax.set_title('Correlation Heatmap of Area Method of Moments Features')
Code/Corr_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Removendo linhas duplicadas no Pandas # # import pandas as pd import numpy as np # + # read a dataset of movie reviwers into a Dataframe colunas = ['user_id', 'age', 'gender','occupation', 'zip_code'] usuarios = pd.read_table("Duplicadas.txt", sep='|', header=None, names=colunas) usuarios.head(10) # + # Verificando linhas e colunas usuarios.shape # + # Procurando por CEPS duplicados usuarios.zip_code.duplicated().head(10) # + # Como aprendemos, uma serie boolena pode ser convertida em 0 e 1 automaticamente ao utilizarmos métodos de agregação usuarios.zip_code.duplicated().sum() # + # verificando se uma linha inteira é duplicada usuarios.duplicated() # + # Contando as linhas duplicadas usuarios.duplicated().sum() # + # mantendo a primeira linha usuarios.loc[usuarios.duplicated(keep='first'), :] # - usuarios.loc[usuarios.duplicated(keep='last'), :] # + # Todas as linhas duplicadas usuarios.loc[usuarios.duplicated(keep=False), :] # + # Apagando as linhas duplicadas (inplace= False por default) Shape do Dataset que ficou usuarios.drop_duplicates(keep='first').shape # - usuarios.drop_duplicates(keep='last').shape usuarios.drop_duplicates(keep=False).shape # # Utilizando o maior arquivo # + # read a datasetof movie review into a Dataframe colunas = ['user_id','age','gender','occupation','zip_code'] usuarios = pd.read_table('espectadores.txt', sep='|', header=None, names=colunas, index_col='user_id') usuarios.head() # + # Verificando linhas e colunas usuarios.shape # - # # Encontrando linha duplicadas com duplicated() # + # Procurando ceps duplicados usuarios.zip_code.duplicated().head(20) # + # Como aprendemos, uma serie boolenana pode ser convertida em 0 e 1 automaticamente ao utilizarmos métodos de agregação. usuarios.zip_code.duplicated().sum() # + # Verificando se uma LINHA INTEIRA é duplicada] usuarios.duplicated().tail() # + # Contando linhas duplas usuarios.duplicated().sum() # - # ### Regras para o método duplicated() # # - keep='first' (default): Marca as linhas duplicadas como TRUE, Menos a primeira ocorrência. # - keep='last': Marcas as linhas duplicadas como FALSE, MENOS a última ocorrência. # - keep=False: Marca todas as duplicadas como TRUE. # + # Mantendo a primeira linha usuarios.loc[usuarios.duplicated(keep='first'), :] # + # Mantendo a ultima linha usuarios.loc[usuarios.duplicated(keep='last'), :] # + # Todas as linhas duplicadas usuarios.loc[usuarios.duplicated(keep=False), :] # - # # Apagando as linhas duplicadas com drop_duplicated() # + # Apagando as linhas duplicadas (inplace=False por padrão) usuarios.drop_duplicates(keep='first').shape # - usuarios.drop_duplicates(keep='last').shape usuarios.drop_duplicates(keep=False).shape # # Podemos considerar apenas algumas colunas para identificar duplicadas? # + # Nesse caso, queremos considerar uma linha duplicada somente se a idade e cep forem iguais usuarios.duplicated(subset=['age','zip_code']).sum() # - usuarios.drop_duplicates(subset=['age','zip_code']).shape
S16_A125 - Removendo duplicatas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:hodemulator] # language: python # name: conda-env-hodemulator-py # --- # My $w(\theta)$ emulator has been having lots of problems. I'm going to first test the actual mock calculations to see if I can find the problem. from pearce.mocks import cat_dict import numpy as np from os import path import matplotlib #matplotlib.use('Agg') from matplotlib import pyplot as plt # %matplotlib inline import seaborn as sns sns.set() a = 1.0#0.81120 z = 1.0/a - 1.0 print z # + cosmo_params = {'simname':'chinchilla', 'Lbox':400.0, 'scale_factors':[a]} cat = cat_dict[cosmo_params['simname']](**cosmo_params)#construct the specified catalog! cat.load_catalog(a) #halo_masses = cat.halocat.halo_table['halo_mvir'] # - cat.load_model(a, 'redMagic') # + params = cat.model.param_dict.copy() params['mean_occupation_centrals_assembias_param1'] = 0.0 params['mean_occupation_satellites_assembias_param1'] = 0.0 params['logMmin'] = 12.089 params['sigma_logM'] = 0.33 params['f_c'] = 1.0 params['alpha'] = 1.1 params['logM1'] = 13.3 params['logM0'] = params['logMmin'] print params # - cat.populate(params) theta_bins = np.logspace(np.log10(0.004), 0, 24)#/60 tpoints = (theta_bins[1:]+theta_bins[:-1])/2 # + active="" # plt.plot(ra,dec,'.',color='red') # plt.xlim([-180,180]) # plt.ylim([-90,90]) # plt.ylabel(r'$\delta$ $[{\rm degrees}]$', fontsize=20) # plt.xlabel(r'$\alpha$ $[{\rm degrees}]$', fontsize=20) # plt.xticks(size=15) # plt.yticks(size=15) # plt.title('Mock catalog in angular coordinates', fontsize=20) # - wt = cat.calc_wt(theta_bins, do_jackknife=False) plt.plot(tpoints,wt) plt.plot(tpoints, 0.04*np.power(tpoints, -0.9)) #plt.yscale('log') plt.loglog() plt.xlim([5e-3, 1.1]) #plt.ylim([1e-4, 2.0]) tpoints plt.plot(tpoints,wt/(0.02*np.power(tpoints,-0.8))) plt.xscale('log') #plt.loglog() plt.xlim([1e-2, 1.0]) #plt.ylim([1e-4, 2.0]) zbin = 1 redmagic_wt = np.loadtxt('/u/ki/swmclau2/Git/pearce/bin/mcmc/buzzard2_wt_%d%d.npy'%(zbin, zbin)) redmagic_nd = np.loadtxt('/u/ki/swmclau2/Git/pearce/bin/mcmc/buzzard2_nd_%d%d.npy'%(zbin, zbin)) # + active="" # redmagic_nd[0] # - print cat.calc_analytic_nd() redmagic_wt.shape, tpoints.shape # + active="" # plt.plot(tpoints,redmagic_wt) # #plt.yscale('log') # plt.loglog() # plt.xlim([2.25/60, 275/60]) # plt.ylim([1e-4, 2.0]) # - # + from halotools.mock_observables import * # i'm importing so much this is just easier pos = np.vstack([cat.model.mock.galaxy_table[c] for c in ['x', 'y', 'z']]).T vels = np.vstack([cat.model.mock.galaxy_table[c] for c in ['vx', 'vy', 'vz']]).T # TODO is the model cosmo same as the one attached to the cat? ra, dec, z = mock_survey.ra_dec_z(pos * cat.h, vels, cosmo=cat.cosmology) ang_pos = np.vstack((np.degrees(ra), np.degrees(dec))).T ra = np.degrees(ra) dec = np.degrees(dec) # - plt.plot(ra,dec,'.',color='red') plt.xlim([-180,180]) plt.ylim([-90,90]) plt.ylabel(r'$\delta$ $[{\rm degrees}]$', fontsize=20) plt.xlabel(r'$\alpha$ $[{\rm degrees}]$', fontsize=20) plt.xticks(size=15) plt.yticks(size=15) plt.title('Mock catalog in angular coordinates', fontsize=20) # + active="" # plt.scatter(ang_pos[:10000,0], ang_pos[:10000,1]) # + n_rands = 5 rand_pos = np.random.random((pos.shape[0] * n_rands, 3)) * cat.Lbox#*cat.h rand_vels = np.zeros((pos.shape[0] * n_rands, 3)) rand_ra, rand_dec, rand_z = mock_survey.ra_dec_z(rand_pos * cat.h, rand_vels, cosmo=cat.cosmology) rand_ang_pos = np.vstack((np.degrees(rand_ra), np.degrees(rand_dec))).T # - wt_all = angular_tpcf(ang_pos, theta_bins,randoms = rand_ang_pos, num_threads=1) print wt_all print wt wt_all theta_bins pos cat.cosmology # + pos = np.vstack([cat.model.mock.galaxy_table[c] for c in ['x', 'y', 'z']]).T coords = pos- cat.model.mock.Lbox/2.0 ra_init, dec_init, z = mock_survey.ra_dec_z(coords*cat.h, vels, cosmo=cat.cosmology) #keep a complete spherical volume r = np.sqrt(coords[:,0]**2 + coords[:,1]**2 + coords[:,2]**2) keep = r<cat.Lbox/2.0 ra = np.degrees(ra_init[keep]) dec = np.degrees(dec_init[keep]) angular_coords = np.vstack((ra,dec)).T # - w_theta = angular_tpcf(angular_coords, theta_bins, num_threads='max') theta_bins w_theta plt.plot(ra,dec,'.',color='blue', ms = 2.0) plt.xlim([-180,180]) plt.ylim([-90,90]) plt.ylabel(r'$\delta$ $[{\rm degrees}]$', fontsize=20) plt.xlabel(r'$\alpha$ $[{\rm degrees}]$', fontsize=20) plt.xticks(size=15) plt.yticks(size=15) plt.title('Mock catalog in angular coordinates', fontsize=20) plt.plot(tpoints,wt_all, label = 'Observer in corner') plt.plot(tpoints, w_theta, label = 'Observer in center') #plt.xscale('log') plt.loglog() plt.xlim([1e-2, 1.0]) #plt.ylim([1e-4, 2.0]) plt.legend(loc='best', fontsize = 15) plt.xlabel(r'$\theta$ [Degrees]') plt.ylabel(r'$w(\theta)$') plt.title('z = %.2f'%(1.0/a - 1.0)) # + ra_init, dec_init, z = mock_survey.ra_dec_z(coords*cat.h, vels, cosmo=cat.cosmology) ra, dec, z = mock_survey.ra_dec_z(pos * cat.h, vels, cosmo=cat.cosmology)
notebooks/Test wt Calculation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Sourcing fire data # This notebook stitches together downloaded jsons from fire.ca.gov to create several dataframes used in the main notebook. # # The underlying jsons are not provided, but one can download it for themselves from ca.fire.gov and run this notebook to create updated files. # + import json # for raw fire data import numpy as np # for nan import pandas as pd # to convert json data to dataframe import pyarrow.feather as feather # lightweight export of dataframe from scipy.spatial.distance import cdist # + # Loading Data path_to_read_data = "./data" # this data can be downloaded from ca.fire.gov; replace path as appropriate path_to_write_data = "./data" # Opening JSON start = 2016 end = 2021 # + # initiate data with first year with open(f"{path_to_read_data}/calfire-{start}.json") as f: start_data = json.load(f) annual_data = start_data[f"firedata{start}"] print(f"Number of fires in {start} : {len(annual_data)}") # - # add second year through final year for year in range(start + 1, end): with open(f"{path_to_read_data}/calfire-{year}.json") as f: additional_data = json.load(f) new_data = additional_data[f"firedata{year}"] print(f"Number of fires in {year} : {len(new_data)}") for new_datum in new_data: annual_data.append(new_datum) # clean and convert data to dataframe df = pd.DataFrame.from_dict(annual_data) df = df.loc[df["AcresBurnedDisplay"] != 0] # removing trivial data # Creating a dataframe hosting the relationship between each solar data station to the wildfire locations and a dataframe with fire lat/lons # + # create df of fire+lat+lon fire_latlon = df.filter(["Name", "Latitude", "Longitude", "StartedDate"], axis=1) fire_latlon["LatLon"] = [ (x, y) for x, y in zip(fire_latlon["Latitude"], fire_latlon["Longitude"]) ] fire_latlon["When"] = pd.to_datetime(fire_latlon["StartedDate"]).dt.strftime("%Y-%m-%d") del fire_latlon["StartedDate"] fire_latlon.head() # - fire_latlon["Name"] = fire_latlon["When"].astype(str) + fire_latlon["Name"] del fire_latlon["When"] # + # pull in station lat+lon solar_df = pd.read_feather( "s3://data.atoti.io/notebooks/ca-solar/nsrdb_station_lat_lon.feather" ) solar_df["LatLon"] = [ (x, y) for x, y in zip(solar_df["Latitude"], solar_df["Longitude"]) ] solar_df.head() # + # create matrix of distances fire_station_dist = cdist(list(solar_df["LatLon"]), list(fire_latlon["LatLon"])) fs_dist = pd.DataFrame(data=fire_station_dist, columns=fire_latlon["Name"]) fs_dist.head() # - dist = pd.concat([solar_df["Station"], fs_dist], axis=1) dist.head() # + dist_df = pd.melt( dist, id_vars="Station", ignore_index=False, var_name="Fire", value_name="Distance" ) dist_df.head() # - fire_loc = fire_latlon.drop("LatLon", axis=1) fire_loc.rename( columns={ "Name": "Fire", }, inplace=True, ) feather.write_feather( dist_df, f"{path_to_write_data}/distance.feather", compression="zstd" ) feather.write_feather( fire_loc, f"{path_to_write_data}/fire_loc.feather", compression="zstd" ) # Creating a dataframe holding the key fire data df.head() # + fire_data = df.filter( ["Name", "AcresBurnedDisplay", "StartedDate", "UpdatedDate"], axis=1 ) fire_data["StartedMonth"] = pd.to_datetime(fire_data["StartedDate"]).dt.strftime("%m") fire_data["When"] = pd.to_datetime(fire_data["StartedDate"]).dt.strftime("%Y-%m-%d") fire_data["UpdatedDate"] = pd.to_datetime(fire_data["UpdatedDate"]) fire_data["UpdatedDate"] = [d.date() for d in fire_data["UpdatedDate"]] fire_data["Name"] = fire_data["When"].astype(str) + fire_data["Name"] del fire_data["When"] fire_data.rename( columns={ "UpdatedDate": "EndedDate", "AcresBurnedDisplay": "AcresBurned", "Name": "Fire", }, inplace=True, ) fire_data.head() # - feather.write_feather( fire_data, f"{path_to_write_data}/fire_data.feather", compression="zstd" )
notebooks/ca-solar/02-fire-data-sourcing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 📈 Linear Regression with Python # # > Linear Regression is the simplest algorithm in machine learning, it can be trained in different ways. In this notebook we will cover the following linear algorithms: # # > 1. Linear Regression # > 2. Robust Regression # > 3. Ridge Regression # > 4. LASSO Regression # > 5. Elastic Net # > 6. Polynomial Regression # > 7. Stochastic Gradient Descent # > 8. Artificial Neaural Networks # # # 💾 Data # # > We are going to use the `USA_Housing` dataset. Since house price is a continues variable, this is a regression problem. The data contains the following columns: # # > * '`Avg. Area Income`': Avg. Income of residents of the city house is located in. # > * '`Avg. Area House Age`': Avg Age of Houses in same city # > * '`Avg. Area Number of Rooms`': Avg Number of Rooms for Houses in same city # > * '`Avg. Area Number of Bedrooms`': Avg Number of Bedrooms for Houses in same city # > * '`Area Population`': Population of city hou se is located in # > * '`Price`': Price that the house sold at # > * '`Address`': Address for the house # # # 📤 Import Libraries # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline sns.set_style("whitegrid") plt.style.use("fivethirtyeight") # - # ## 💾 Check out the Data USAhousing = pd.read_csv('/kaggle/input/usa-housing/USA_Housing.csv') USAhousing.head() USAhousing.info() USAhousing.describe() USAhousing.columns # # 📊 Exploratory Data Analysis (EDA) # # Let's create some simple plots to check out the data! sns.pairplot(USAhousing) sns.distplot(USAhousing['Price']) sns.heatmap(USAhousing.corr(), annot=True) # # 📈 Training a Linear Regression Model # # > Let's now begin to train out regression model! We will need to first split up our data into an X array that contains the features to train on, and a y array with the target variable, in this case the Price column. We will toss out the Address column because it only has text info that the linear regression model can't use. # # ## X and y arrays X = USAhousing[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms', 'Avg. Area Number of Bedrooms', 'Area Population']] y = USAhousing['Price'] # ## 🧱 Train Test Split # # Now let's split the data into a training set and a testing set. We will train out model on the training set and then use the test set to evaluate the model. # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # + from sklearn import metrics from sklearn.model_selection import cross_val_score def cross_val(model): pred = cross_val_score(model, X, y, cv=10) return pred.mean() def print_evaluate(true, predicted): mae = metrics.mean_absolute_error(true, predicted) mse = metrics.mean_squared_error(true, predicted) rmse = np.sqrt(metrics.mean_squared_error(true, predicted)) r2_square = metrics.r2_score(true, predicted) print('MAE:', mae) print('MSE:', mse) print('RMSE:', rmse) print('R2 Square', r2_square) print('__________________________________') def evaluate(true, predicted): mae = metrics.mean_absolute_error(true, predicted) mse = metrics.mean_squared_error(true, predicted) rmse = np.sqrt(metrics.mean_squared_error(true, predicted)) r2_square = metrics.r2_score(true, predicted) return mae, mse, rmse, r2_square # - # # 📦 Preparing Data For Linear Regression # > Linear regression is been studied at great length, and there is a lot of literature on how your data must be structured to make best use of the model. # # > As such, there is a lot of sophistication when talking about these requirements and expectations which can be intimidating. In practice, you can uses these rules more as rules of thumb when using Ordinary Least Squares Regression, the most common implementation of linear regression. # # > Try different preparations of your data using these heuristics and see what works best for your problem. # - **Linear Assumption.** Linear regression assumes that the relationship between your input and output is linear. It does not support anything else. This may be obvious, but it is good to remember when you have a lot of attributes. You may need to transform data to make the relationship linear (e.g. log transform for an exponential relationship). # - **Remove Noise.** Linear regression assumes that your input and output variables are not noisy. Consider using data cleaning operations that let you better expose and clarify the signal in your data. This is most important for the output variable and you want to remove outliers in the output variable (y) if possible. # - **Remove Collinearity.** Linear regression will over-fit your data when you have highly correlated input variables. Consider calculating pairwise correlations for your input data and removing the most correlated. # - **Gaussian Distributions.** Linear regression will make more reliable predictions if your input and output variables have a Gaussian distribution. You may get some benefit using transforms (e.g. log or BoxCox) on you variables to make their distribution more Gaussian looking. # - **Rescale Inputs:** Linear regression will often make more reliable predictions if you rescale input variables using standardization or normalization. # + from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline pipeline = Pipeline([ ('std_scalar', StandardScaler()) ]) X_train = pipeline.fit_transform(X_train) X_test = pipeline.transform(X_test) # - # # ✔️ Linear Regression # + from sklearn.linear_model import LinearRegression lin_reg = LinearRegression(normalize=True) lin_reg.fit(X_train,y_train) # - # ## ✔️ Model Evaluation # # Let's evaluate the model by checking out it's coefficients and how we can interpret them. # print the intercept print(lin_reg.intercept_) coeff_df = pd.DataFrame(lin_reg.coef_, X.columns, columns=['Coefficient']) coeff_df # > Interpreting the coefficients: # - Holding all other features fixed, a 1 unit increase in **Avg. Area Income** is associated with an **increase of \$21.52**. # - Holding all other features fixed, a 1 unit increase in **Avg. Area House Age** is associated with an **increase of \$164883.28**. # - Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Rooms** is associated with an **increase of \$122368.67**. # - Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Bedrooms** is associated with an **increase of \$2233.80**. # - Holding all other features fixed, a 1 unit increase in **Area Population** is associated with an **increase of \$15.15**. # # Does this make sense? Probably not because I made up this data. # ## ✔️ Predictions from our Model # # Let's grab predictions off our test set and see how well it did! pred = lin_reg.predict(X_test) plt.scatter(y_test, pred) # **Residual Histogram** sns.distplot((y_test - pred), bins=50); # ## ✔️ Regression Evaluation Metrics # # # Here are three common evaluation metrics for regression problems: # # > - **Mean Absolute Error** (MAE) is the mean of the absolute value of the errors: # $$\frac 1n\sum_{i=1}^n|y_i-\hat{y}_i|$$ # # > - **Mean Squared Error** (MSE) is the mean of the squared errors: # $$\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2$$ # # > - **Root Mean Squared Error** (RMSE) is the square root of the mean of the squared errors: # $$\sqrt{\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2}$$ # # > 📌 Comparing these metrics: # - **MAE** is the easiest to understand, because it's the average error. # - **MSE** is more popular than MAE, because MSE "punishes" larger errors, which tends to be useful in the real world. # - **RMSE** is even more popular than MSE, because RMSE is interpretable in the "y" units. # # > All of these are **loss functions**, because we want to minimize them. # + test_pred = lin_reg.predict(X_test) train_pred = lin_reg.predict(X_train) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df = pd.DataFrame(data=[["Linear Regression", *evaluate(y_test, test_pred) , cross_val(LinearRegression())]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', "Cross Validation"]) results_df # # ✔️ Robust Regression # # > Robust regression is a form of regression analysis designed to overcome some limitations of traditional parametric and non-parametric methods. Robust regression methods are designed to be not overly affected by violations of assumptions by the underlying data-generating process. # # > One instance in which robust estimation should be considered is when there is a strong suspicion of `heteroscedasticity`. # # > A common situation in which robust estimation is used occurs when the data contain outliers. In the presence of outliers that do not come from the same data-generating process as the rest of the data, least squares estimation is inefficient and can be biased. Because the least squares predictions are dragged towards the outliers, and because the variance of the estimates is artificially inflated, the result is that outliers can be masked. (In many situations, including some areas of geostatistics and medical statistics, it is precisely the outliers that are of interest.) # ## Random Sample Consensus - RANSAC # # > Random sample consensus (`RANSAC`) is an iterative method to estimate parameters of a mathematical model from a set of observed data that contains outliers, when outliers are to be accorded no influence on the values of the estimates. Therefore, it also can be interpreted as an outlier detection method. # # > A basic assumption is that the data consists of "inliers", i.e., data whose distribution can be explained by some set of model parameters, though may be subject to noise, and "outliers" which are data that do not fit the model. The outliers can come, for example, from extreme values of the noise or from erroneous measurements or incorrect hypotheses about the interpretation of data. RANSAC also assumes that, given a (usually small) set of inliers, there exists a procedure which can estimate the parameters of a model that optimally explains or fits this data. # + from sklearn.linear_model import RANSACRegressor model = RANSACRegressor(base_estimator=LinearRegression(), max_trials=100) model.fit(X_train, y_train) test_pred = model.predict(X_test) train_pred = model.predict(X_train) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('====================================') print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df_2 = pd.DataFrame(data=[["Robust Regression", *evaluate(y_test, test_pred) , cross_val(RANSACRegressor())]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', "Cross Validation"]) results_df = results_df.append(results_df_2, ignore_index=True) results_df # # ✔️ Ridge Regression # # > Source: [scikit-learn](http://scikit-learn.org/stable/modules/linear_model.html#ridge-regression) # # > Ridge regression addresses some of the problems of **Ordinary Least Squares** by imposing a penalty on the size of coefficients. The ridge coefficients minimize a penalized residual sum of squares, # # $$\min_{w}\big|\big|Xw-y\big|\big|^2_2+\alpha\big|\big|w\big|\big|^2_2$$ # # > $\alpha>=0$ is a complexity parameter that controls the amount of shrinkage: the larger the value of $\alpha$, the greater the amount of shrinkage and thus the coefficients become more robust to collinearity. # # > Ridge regression is an L2 penalized model. Add the squared sum of the weights to the least-squares cost function. # *** # + from sklearn.linear_model import Ridge model = Ridge(alpha=100, solver='cholesky', tol=0.0001, random_state=42) model.fit(X_train, y_train) pred = model.predict(X_test) test_pred = model.predict(X_test) train_pred = model.predict(X_train) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('====================================') print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df_2 = pd.DataFrame(data=[["Ridge Regression", *evaluate(y_test, test_pred) , cross_val(Ridge())]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', "Cross Validation"]) results_df = results_df.append(results_df_2, ignore_index=True) results_df # # ✔️ LASSO Regression # # > A linear model that estimates sparse coefficients. # # > Mathematically, it consists of a linear model trained with $\ell_1$ prior as regularizer. The objective function to minimize is: # # $$\min_{w}\frac{1}{2n_{samples}} \big|\big|Xw - y\big|\big|_2^2 + \alpha \big|\big|w\big|\big|_1$$ # # > The lasso estimate thus solves the minimization of the least-squares penalty with $\alpha \big|\big|w\big|\big|_1$ added, where $\alpha$ is a constant and $\big|\big|w\big|\big|_1$ is the $\ell_1-norm$ of the parameter vector. # *** # + from sklearn.linear_model import Lasso model = Lasso(alpha=0.1, precompute=True, # warm_start=True, positive=True, selection='random', random_state=42) model.fit(X_train, y_train) test_pred = model.predict(X_test) train_pred = model.predict(X_train) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('====================================') print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df_2 = pd.DataFrame(data=[["Lasso Regression", *evaluate(y_test, test_pred) , cross_val(Lasso())]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', "Cross Validation"]) results_df = results_df.append(results_df_2, ignore_index=True) results_df # # ✔️ Elastic Net # # > A linear regression model trained with L1 and L2 prior as regularizer. # # > This combination allows for learning a sparse model where few of the weights are non-zero like Lasso, while still maintaining the regularization properties of Ridge. # # > Elastic-net is useful when there are multiple features which are correlated with one another. Lasso is likely to pick one of these at random, while elastic-net is likely to pick both. # # > A practical advantage of trading-off between Lasso and Ridge is it allows Elastic-Net to inherit some of Ridge’s stability under rotation. # # > The objective function to minimize is in this case # # $$\min_{w}{\frac{1}{2n_{samples}} \big|\big|X w - y\big|\big|_2 ^ 2 + \alpha \rho \big|\big|w\big|\big|_1 + # \frac{\alpha(1-\rho)}{2} \big|\big|w\big|\big|_2 ^ 2}$$ # *** # + from sklearn.linear_model import ElasticNet model = ElasticNet(alpha=0.1, l1_ratio=0.9, selection='random', random_state=42) model.fit(X_train, y_train) test_pred = model.predict(X_test) train_pred = model.predict(X_train) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('====================================') print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df_2 = pd.DataFrame(data=[["Elastic Net Regression", *evaluate(y_test, test_pred) , cross_val(ElasticNet())]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', "Cross Validation"]) results_df = results_df.append(results_df_2, ignore_index=True) results_df # # ✔️ Polynomial Regression # > Source: [scikit-learn](http://scikit-learn.org/stable/modules/linear_model.html#polynomial-regression-extending-linear-models-with-basis-functions) # # *** # # > One common pattern within machine learning is to use linear models trained on nonlinear functions of the data. This approach maintains the generally fast performance of linear methods, while allowing them to fit a much wider range of data. # # > For example, a simple linear regression can be extended by constructing polynomial features from the coefficients. In the standard linear regression case, you might have a model that looks like this for two-dimensional data: # # $$\hat{y}(w, x) = w_0 + w_1 x_1 + w_2 x_2$$ # # > If we want to fit a paraboloid to the data instead of a plane, we can combine the features in second-order polynomials, so that the model looks like this: # # $$\hat{y}(w, x) = w_0 + w_1 x_1 + w_2 x_2 + w_3 x_1 x_2 + w_4 x_1^2 + w_5 x_2^2$$ # # > The (sometimes surprising) observation is that this is still a linear model: to see this, imagine creating a new variable # # $$z = [x_1, x_2, x_1 x_2, x_1^2, x_2^2]$$ # # > With this re-labeling of the data, our problem can be written # # $$\hat{y}(w, x) = w_0 + w_1 z_1 + w_2 z_2 + w_3 z_3 + w_4 z_4 + w_5 z_5$$ # # > We see that the resulting polynomial regression is in the same class of linear models we’d considered above (i.e. the model is linear in w) and can be solved by the same techniques. By considering linear fits within a higher-dimensional space built with these basis functions, the model has the flexibility to fit a much broader range of data. # *** # + from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=2) X_train_2_d = poly_reg.fit_transform(X_train) X_test_2_d = poly_reg.transform(X_test) lin_reg = LinearRegression(normalize=True) lin_reg.fit(X_train_2_d,y_train) test_pred = lin_reg.predict(X_test_2_d) train_pred = lin_reg.predict(X_train_2_d) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('====================================') print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df_2 = pd.DataFrame(data=[["Polynomail Regression", *evaluate(y_test, test_pred), 0]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', 'Cross Validation']) results_df = results_df.append(results_df_2, ignore_index=True) results_df # # ✔️ Stochastic Gradient Descent # # > Gradient Descent is a very generic optimization algorithm capable of finding optimal solutions to a wide range of problems. The general idea of Gradient Sescent is to tweak parameters iteratively in order to minimize a cost function. Gradient Descent measures the local gradient of the error function with regards to the parameters vector, and it goes in the direction of descending gradient. Once the gradient is zero, you have reached a minimum. # + from sklearn.linear_model import SGDRegressor sgd_reg = SGDRegressor(n_iter_no_change=250, penalty=None, eta0=0.0001, max_iter=100000) sgd_reg.fit(X_train, y_train) test_pred = sgd_reg.predict(X_test) train_pred = sgd_reg.predict(X_train) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('====================================') print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df_2 = pd.DataFrame(data=[["Stochastic Gradient Descent", *evaluate(y_test, test_pred), 0]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', 'Cross Validation']) results_df = results_df.append(results_df_2, ignore_index=True) results_df # # ✔️ Artficial Neural Network # + _kg_hide-output=true from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Input, Dense, Activation, Dropout from tensorflow.keras.optimizers import Adam X_train = np.array(X_train) X_test = np.array(X_test) y_train = np.array(y_train) y_test = np.array(y_test) model = Sequential() model.add(Dense(X_train.shape[1], activation='relu')) model.add(Dense(32, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(128, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.1)) model.add(Dense(1)) model.compile(optimizer=Adam(0.00001), loss='mse') r = model.fit(X_train, y_train, validation_data=(X_test,y_test), batch_size=1, epochs=100) # + plt.figure(figsize=(10, 6)) plt.plot(r.history['loss'], label='loss') plt.plot(r.history['val_loss'], label='val_loss') plt.legend() # + test_pred = model.predict(X_test) train_pred = model.predict(X_train) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df_2 = pd.DataFrame(data=[["Artficial Neural Network", *evaluate(y_test, test_pred), 0]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', 'Cross Validation']) results_df = results_df.append(results_df_2, ignore_index=True) results_df # # ✔️ Random Forest Regressor # + from sklearn.ensemble import RandomForestRegressor rf_reg = RandomForestRegressor(n_estimators=1000) rf_reg.fit(X_train, y_train) test_pred = rf_reg.predict(X_test) train_pred = rf_reg.predict(X_train) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df_2 = pd.DataFrame(data=[["Random Forest Regressor", *evaluate(y_test, test_pred), 0]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', 'Cross Validation']) results_df = results_df.append(results_df_2, ignore_index=True) results_df # # ✔️ Support Vector Machine # + from sklearn.svm import SVR svm_reg = SVR(kernel='rbf', C=1000000, epsilon=0.001) svm_reg.fit(X_train, y_train) test_pred = svm_reg.predict(X_test) train_pred = svm_reg.predict(X_train) print('Test set evaluation:\n_____________________________________') print_evaluate(y_test, test_pred) print('Train set evaluation:\n_____________________________________') print_evaluate(y_train, train_pred) # - results_df_2 = pd.DataFrame(data=[["SVM Regressor", *evaluate(y_test, test_pred), 0]], columns=['Model', 'MAE', 'MSE', 'RMSE', 'R2 Square', 'Cross Validation']) results_df = results_df.append(results_df_2, ignore_index=True) results_df # # 📊 Models Comparison results_df.set_index('Model', inplace=True) results_df['R2 Square'].plot(kind='barh', figsize=(12, 8)) # # 📝 Summary # In this notebook you discovered the linear regression algorithm for machine learning. # # You covered a lot of ground including: # > - The common linear regression models (Ridge, Lasso, ElasticNet, ...). # > - The representation used by the model. # > - Learning algorithms used to estimate the coefficients in the model. # > - Rules of thumb to consider when preparing data for use with linear regression. # > - How to evaluate a linear regression model. # # # # 🔗 References: # - [Scikit-learn library](https://scikit-learn.org/stable/supervised_learning.html#supervised-learning) # - [Linear Regression for Machine Learning by <NAME> PhD](https://machinelearningmastery.com/linear-regression-for-machine-learning/)
day02/linear-regression-house-price.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pyvizenv] * # language: python # name: conda-env-pyvizenv-py # --- # # San Francisco Rental Prices Dashboard # # In this notebook, you will compile the visualizations from the previous analysis into functions that can be used for a Panel dashboard. # + # imports import panel as pn pn.extension('plotly') import plotly.express as px import pandas as pd import hvplot.pandas import matplotlib.pyplot as plt import os from pathlib import Path from dotenv import load_dotenv import panel as pn from panel.interact import interact # - # Read the Mapbox API key load_dotenv('test1.env') map_box_api = os.getenv("mapbox") px.set_mapbox_access_token(map_box_api) # # Import Data # + # Import the necessary CSVs to Pandas DataFrames # YOUR CODE HERE! file_path = Path("Data/sfo_neighborhoods_census_data.csv") sfo_data = pd.read_csv(file_path, index_col="year") sfo_data.head() coord_path = Path("Data/neighborhoods_coordinates.csv") coord_data = pd.read_csv(coord_path) coord_data.head() # - # - - - # ## Panel Visualizations # # In this section, you will copy the code for each plot type from your analysis notebook and place it into separate functions that Panel can use to create panes for the dashboard. # # These functions will convert the plot object to a Panel pane. # # Be sure to include any DataFrame transformation/manipulation code required along with the plotting code. # # Return a Panel pane object from each function that can be used to build the dashboard. # # Note: Remove any `.show()` lines from the code. We want to return the plots instead of showing them. The Panel dashboard will then display the plots. # + # Define Panel Visualization Functions def housing_units_per_year(): """Housing Units Per Year.""" # YOUR CODE HERE! #set up data sfo_house_units = pd.DataFrame(sfo_data.groupby('year')['housing_units'].mean()) tot_housing = sfo_data.groupby(['year'])['housing_units'].mean() #plot # fig, ax = plot() # ax = sfo_house_units.plot.bar(title = "Housing Units in San Francisco from 2010 to 2016", ylim = (tot_housing.min() - tot_housing.std(), tot_housing.max() + tot_housing.std())) # return (fig1) fig, ax = plt.subplots() ax.bar( sfo_house_units.index, sfo_house_units['housing_units'] ) ax.set_ylim(tot_housing.min() - tot_housing.std(), tot_housing.max() + tot_housing.std()) ax.set_title ("Housing Units in San Francisco from 2010 to 2016") ax.set_xlabel("Year") ax.set_ylabel("Houseing Units") return (fig) def average_gross_rent(): """Average Gross Rent in San Francisco Per Year.""" # YOUR CODE HERE! # awr up data sfo_house_units = pd.DataFrame(sfo_data.groupby('year')['housing_units'].mean()) sfo_house_units['sale_price_sqr_foot'] = sfo_data.groupby('year')['sale_price_sqr_foot'].mean() sfo_house_units ['gross_rent'] = sfo_data.groupby('year')['gross_rent'].mean() #plot fig2, ax2 = plt.subplots() ax2.plot(sfo_house_units.index, sfo_house_units['gross_rent'],color='tab:orange' ) ax2.set_ylabel('Price per SqFt', fontweight="bold", fontsize = 12) ax2.set_xlabel('Year', fontweight="bold", fontsize = 12) ax2.set_title('Average Gross Rent by Year',fontweight="bold", fontsize = 12 ) return (fig2) def average_sales_price(): """Average Sales Price Per Year.""" # YOUR CODE HERE! #set up data sfo_house_units = pd.DataFrame(sfo_data.groupby('year')['housing_units'].mean()) sfo_house_units['sale_price_sqr_foot'] = sfo_data.groupby('year')['sale_price_sqr_foot'].mean() sfo_house_units ['gross_rent'] = sfo_data.groupby('year')['gross_rent'].mean() #plot fig1, ax1 = plt.subplots() ax1.plot(sfo_house_units.index, sfo_house_units['sale_price_sqr_foot'], color='tab:blue') ax1.set_ylabel('Price per SqFt', fontweight="bold", fontsize = 12) ax1.set_xlabel('Year', fontweight="bold", fontsize = 12) ax1.set_title('Average Price per SqFt by Year',fontweight="bold", fontsize = 12 ) return (fig1) def average_price_by_neighborhood(Neighborhood): """Average Prices by Neighborhood.""" # YOUR CODE HERE! # set up data sfo_neighb = pd.DataFrame(columns = ['sale_price_sqr_foot', 'housing_units', 'gross_rent']) sfo_neighb['sale_price_sqr_foot'] = sfo_data.groupby(['year','neighborhood' ])['sale_price_sqr_foot'].mean() sfo_neighb ['gross_rent'] = sfo_data.groupby(['year','neighborhood' ])['gross_rent'].mean() sfo_neighb ['housing_units'] = sfo_data.groupby(['year','neighborhood' ])['housing_units'].mean() sfo_neighb.reset_index(inplace = True) #set up plotting datafraem disp_df = pd.DataFrame() disp_df['years'] = sfo_neighb.loc[sfo_neighb['neighborhood'] == Neighborhood, 'year'] disp_df['sale_price_sqr_foot'] = sfo_neighb.loc[sfo_neighb['neighborhood'] == Neighborhood, 'sale_price_sqr_foot'] #return plot return disp_df.hvplot.line( x = 'years', y = 'sale_price_sqr_foot', title = (f'Neighborhood: {Neighborhood}'), ) def top_most_expensive_neighborhoods(): """Top 10 Most Expensive Neighborhoods.""" # YOUR CODE HERE! # set up data in two steps from rental analysis. may be combined into one. sfo_neighb = pd.DataFrame(columns = ['sale_price_sqr_foot', 'housing_units', 'gross_rent']) sfo_neighb['sale_price_sqr_foot'] = sfo_data.groupby(['year','neighborhood' ])['sale_price_sqr_foot'].mean() sfo_neighb ['gross_rent'] = sfo_data.groupby(['year','neighborhood' ])['gross_rent'].mean() sfo_neighb ['housing_units'] = sfo_data.groupby(['year','neighborhood' ])['housing_units'].mean() sfo_avg_df = pd.DataFrame() sfo_avg_df ['sale_price_sqr_foot'] = sfo_neighb.groupby('neighborhood')['sale_price_sqr_foot'].mean() sfo_avg_df ['housing_units'] = sfo_neighb.groupby('neighborhood')['housing_units'].mean() sfo_avg_df ['gross_rent'] = sfo_neighb.groupby('neighborhood')['gross_rent'].mean() sf_topten_df = sfo_avg_df.nlargest(10,'gross_rent' ) sf_topten_df.sort_values(by = 'sale_price_sqr_foot', inplace = True) sf_topten_df.reset_index(inplace = True) #plot return sf_topten_df.hvplot.bar( x = 'neighborhood', y = 'sale_price_sqr_foot', title = ('Top 10 Most Expensive Neighborhoods - selection not working in tab'), rot = 90 ) def most_expensive_neighborhoods_rent_sales(Neighbourhood): """Comparison of Rent and Sales Prices of Most Expensive Neighborhoods.""" # YOUR CODE HERE! # set up data in two steps from rental analysis. may be combined into one. sfo_neighb = pd.DataFrame(columns = ['sale_price_sqr_foot', 'housing_units', 'gross_rent']) sfo_neighb['sale_price_sqr_foot'] = sfo_data.groupby(['year','neighborhood' ])['sale_price_sqr_foot'].mean() sfo_neighb ['gross_rent'] = sfo_data.groupby(['year','neighborhood' ])['gross_rent'].mean() sfo_neighb ['housing_units'] = sfo_data.groupby(['year','neighborhood' ])['housing_units'].mean() #prepare summary data test_df = pd.DataFrame() test_df['gross_rent'] = sfo_neighb.groupby(['year', 'neighborhood'])['gross_rent'].mean() test_df['sale_price_sqr_foot']= sfo_neighb.groupby(['year', 'neighborhood'])['sale_price_sqr_foot'].mean() test_df.reset_index(inplace=True) #prepare data for display disp_df = pd.DataFrame() disp_df['years'] = test_df.loc[test_df['neighborhood'] == Neighbourhood, 'year'] disp_df['gross_rent'] = test_df.loc[test_df['neighborhood'] == Neighbourhood, 'gross_rent'] disp_df['sale_price_sqr_foot'] = test_df.loc[test_df['neighborhood'] == Neighbourhood, 'sale_price_sqr_foot'] return(disp_df.hvplot.bar( x = 'years', y = ['gross_rent', 'sale_price_sqr_foot' ], value_label = "Price", rot = 90, title = (f'Top 10 Expensive Neighborhoods: {Neighbourhood}')) ) def parallel_coordinates(): """Parallel Coordinates Plot.""" # YOUR CODE HERE! # #set up data sfo_neighb = pd.DataFrame(columns = ['sale_price_sqr_foot', 'housing_units', 'gross_rent']) sfo_neighb['sale_price_sqr_foot'] = sfo_data.groupby(['year','neighborhood' ])['sale_price_sqr_foot'].mean() sfo_neighb ['gross_rent'] = sfo_data.groupby(['year','neighborhood' ])['gross_rent'].mean() sfo_neighb ['housing_units'] = sfo_data.groupby(['year','neighborhood' ])['housing_units'].mean() sfo_avg_df = pd.DataFrame() sfo_avg_df ['sale_price_sqr_foot'] = sfo_neighb.groupby('neighborhood')['sale_price_sqr_foot'].mean() sfo_avg_df ['housing_units'] = sfo_neighb.groupby('neighborhood')['housing_units'].mean() sfo_avg_df ['gross_rent'] = sfo_neighb.groupby('neighborhood')['gross_rent'].mean() sf_topten_df = sfo_avg_df.nlargest(10,'gross_rent' ) sf_topten_df.sort_values(by = 'sale_price_sqr_foot', inplace = True) sf_topten_df.reset_index(inplace = True) # # set up data make a copy of the data so no side effects with global variables df_expensive_neighborhoods = sf_topten_df.copy() df_expensive_neighborhoods.reset_index(inplace = True) # # #set up data sfo_avg_df_reset = sfo_avg_df.copy() sfo_avg_df_reset.reset_index(inplace = True) df_expensive_neighborhoods_per_year = sfo_avg_df_reset[sfo_avg_df_reset["neighborhood"].isin(df_expensive_neighborhoods["neighborhood"])] # set up plot return px.parallel_categories( df_expensive_neighborhoods_per_year, dimensions=["neighborhood", "sale_price_sqr_foot", "housing_units", 'gross_rent'], color="sale_price_sqr_foot", color_continuous_scale=px.colors.sequential.Inferno, labels={ "sale_price_sqr_foot": "sale_price_sqr_foot", "housing_units": "housinhousing_unitsg_units", "gross_rent": "gross_rent", "neighborhood": "neighborhood" }, ) def parallel_categories(): """Parallel Categories Plot.""" # YOUR CODE HERE! # set up data make a copy of the data so no side effects with global variables sfo_neighb = pd.DataFrame(columns = ['sale_price_sqr_foot', 'housing_units', 'gross_rent']) sfo_neighb['sale_price_sqr_foot'] = sfo_data.groupby(['year','neighborhood' ])['sale_price_sqr_foot'].mean() sfo_neighb ['gross_rent'] = sfo_data.groupby(['year','neighborhood' ])['gross_rent'].mean() sfo_neighb ['housing_units'] = sfo_data.groupby(['year','neighborhood' ])['housing_units'].mean() sfo_avg_df = pd.DataFrame() sfo_avg_df ['sale_price_sqr_foot'] = sfo_neighb.groupby('neighborhood')['sale_price_sqr_foot'].mean() sfo_avg_df ['housing_units'] = sfo_neighb.groupby('neighborhood')['housing_units'].mean() sfo_avg_df ['gross_rent'] = sfo_neighb.groupby('neighborhood')['gross_rent'].mean() sf_topten_df = sfo_avg_df.nlargest(10,'gross_rent' ) sf_topten_df.sort_values(by = 'sale_price_sqr_foot', inplace = True) sf_topten_df.reset_index(inplace = True) df_expensive_neighborhoods = sf_topten_df.copy() df_expensive_neighborhoods.reset_index(inplace = True) # set up data sfo_avg_df_reset = sfo_avg_df.copy() sfo_avg_df_reset.reset_index(inplace = True) df_expensive_neighborhoods_per_year = sfo_avg_df_reset[sfo_avg_df_reset["neighborhood"].isin(df_expensive_neighborhoods["neighborhood"])] #plot it return px.parallel_categories( df_expensive_neighborhoods_per_year, dimensions=[ "sale_price_sqr_foot", "housing_units", 'gross_rent'], color="sale_price_sqr_foot", color_continuous_scale=px.colors.sequential.Inferno, labels={ "sale_price_sqr_foot": "sale_price_sqr_foot", "housing_units": "housinhousing_unitsg_units", "gross_rent": "gross_rent", }, ) def neighborhood_map(): """Neighborhood Map.""" # YOUR CODE HERE! # set up data in two steps from rental analysis. may be combined into one. sfo_neighb = pd.DataFrame(columns = ['sale_price_sqr_foot', 'housing_units', 'gross_rent']) sfo_neighb['sale_price_sqr_foot'] = sfo_data.groupby(['year','neighborhood' ])['sale_price_sqr_foot'].mean() sfo_neighb ['gross_rent'] = sfo_data.groupby(['year','neighborhood' ])['gross_rent'].mean() sfo_neighb ['housing_units'] = sfo_data.groupby(['year','neighborhood' ])['housing_units'].mean() # create data frame again sfo_avg_df = pd.DataFrame() sfo_avg_df ['sale_price_sqr_foot'] = sfo_neighb.groupby('neighborhood')['sale_price_sqr_foot'].mean() sfo_avg_df ['housing_units'] = sfo_neighb.groupby('neighborhood')['housing_units'].mean() sfo_avg_df ['gross_rent'] = sfo_neighb.groupby('neighborhood')['gross_rent'].mean() #join coordinates with neighborhood sf_joint_df = coord_data.join(sfo_avg_df, on = 'Neighborhood', how = 'inner') return px.scatter_mapbox( sf_joint_df, lat="Lat", lon="Lon", size="sale_price_sqr_foot", color="gross_rent", title = "Average Sale Price Per Square Foot and Gross Rent in San Francisco - Controls not working in Tab", zoom=4) # Display the # map.show() def sunburst(): """Sunburst Plot.""" # YOUR CODE HERE! # set up data make a copy of the data so no side effects with global variables sfo_neighb = pd.DataFrame(columns = ['sale_price_sqr_foot', 'housing_units', 'gross_rent']) sfo_neighb['sale_price_sqr_foot'] = sfo_data.groupby(['year','neighborhood' ])['sale_price_sqr_foot'].mean() sfo_neighb ['gross_rent'] = sfo_data.groupby(['year','neighborhood' ])['gross_rent'].mean() sfo_neighb ['housing_units'] = sfo_data.groupby(['year','neighborhood' ])['housing_units'].mean() sfo_avg_df = pd.DataFrame() sfo_avg_df ['sale_price_sqr_foot'] = sfo_neighb.groupby('neighborhood')['sale_price_sqr_foot'].mean() sfo_avg_df ['housing_units'] = sfo_neighb.groupby('neighborhood')['housing_units'].mean() sfo_avg_df ['gross_rent'] = sfo_neighb.groupby('neighborhood')['gross_rent'].mean() sf_topten_df = sfo_avg_df.nlargest(10,'gross_rent' ) sf_topten_df.sort_values(by = 'sale_price_sqr_foot', inplace = True) sf_topten_df.reset_index(inplace = True) df_expensive_neighborhoods = sf_topten_df.copy() df_expensive_neighborhoods.reset_index(inplace = True) #set up data starburst_data = sfo_data[sfo_data["neighborhood"].isin(df_expensive_neighborhoods["neighborhood"])] starburst_data.reset_index(inplace=True) starburst_data.head() #plot starburst return px.sunburst(starburst_data, path = ['year','neighborhood' ], color = 'gross_rent', color_continuous_scale = "blues", title = "Cost Analysis of Most Expensive Neighborhoods in San Francisco per year", width=750, height=750 ) # - # make a list of neighborhoods neighborhoods = sfo_data['neighborhood'].unique() # ## Panel Dashboard # # In this section, you will combine all of the plots into a single dashboard view using Panel. Be creative with your dashboard design! # #### Why do charts show up # + # delete # Create a Title for the Dashboard # YOUR CODE HERE! # Create a tab layout for the dashboard # YOUR CODE HERE! # Create the dashboard # YOUR CODE HERE! # + # Create a Title for the Dashboard # YOUR CODE HERE! geo_column = pn.Column( "## Welcome Tab Neighborhood Map", neighborhood_map(), ) # Create a tab layout for the dashboard # YOUR CODE HERE! year_col = pn.Row( "## SFO Market Anaysis", housing_units_per_year(), average_gross_rent(), average_sales_price(), # interact(average_price_by_neighborhood, Neighborhood=neighborhoods ), ) # Create a tab layout for the dashboard # YOUR CODE HERE! neigh_col = pn.Column( "## SFO Neighborhood Analysis", top_most_expensive_neighborhoods(), interact(most_expensive_neighborhoods_rent_sales, Neighbourhood=neighborhoods ), ) # # Create a tab layout for the dashboard # #YOUR CODE HERE! parall_col = pn.Column( "## Parallel Analysis", parallel_coordinates(), parallel_categories(), ) # # Create a tab layout for the dashboard # #YOUR CODE HERE! starb_col = pn.Column( "## Starbust Analsys", sunburst(), ) # Create the dashboard # YOUR CODE HERE! sfo_dashbord = pn.Tabs( ("Welcome", geo_column), ("Yearly Market Analysis", year_col), ('Neighborhood Analysis', neigh_col), ('Parallel Plot Analysis', parall_col), ("Sunburst", starb_col) # ("Welcome", geo_column), ("Yearly Market Analysis", year_col), ('Neighborhood Analysis', neigh_col), # ('Parallel Plot Analysis', parall_col), ("Sunburst", starb_col) ) # - # ## Serve the Panel Dashboard # ### Hassan's Note: # #### Dashboard shows interactive behavior with 3 tabs. # #### Dashboard does not show interactive behavior with 5 tabs that includes parallel plots and Sunburst. # Serve the# dashboard # YOUR CODE HERE! sfo_dashbord.servable() # # Debugging # # Note: Some of the Plotly express plots may not render in the notebook through the panel functions. # # However, you can test each plot by uncommenting the following code # + # housing_units_per_year() # + # average_gross_rent() # + # average_sales_price(average_sales_price, Neighborhood=neighborhoods ) # + # interact(average_price_by_neighborhood, Neighborhood=neighborhoods ) # + # top_most_expensive_neighborhoods() # + # interact(most_expensive_neighborhoods_rent_sales, Neighbourhood=neighborhoods ) # - # neighborhood_map().show() # + # parallel_categories() # + # parallel_coordinates() # + # sunburst() # -
dashboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import re import collections path = 'data/hsa00001.metab.keg.txt' # ! head $path p = '' p2g = collections.defaultdict(set) with open(path, 'r') as f: for l in f: s = l[1:].strip() if l[0] == 'C': s = re.sub(r'\[PATH:.+\]', '', s) s = re.sub('\d{5}', '', s).strip() p = s # print(s) if l[0] == 'D': g = re.search(' (.+);', s) if g: p2g[p].add(g[1]) else: print(s) print(p2g) with open("data/hsa00001.metab.keg.gmt", "w") as f: for p in p2g: g = p2g[p] f.write("{}\n".format("\t".join([p]+list(g)))) # ! grep SI $path
parse_hsa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # F / OH site preference in (F/OH)-substituted anatase TiO<sub>2</sub>. # <NAME> # # This notebook compares the relative energies for different anion configurations in (F/OH)-substituted TiO<sub>2</sub>. # # [Corradini *et al.*](https://dx.doi.org/10.1038/srep11553) and [Li *et al.*](https://dx.doi.org/10.1021/acs.chemmater.5b01407) have previously reported DFT calculations that predict F<sub>O</sub> defects in F-substituted anatase TiO<sub>2</sub> preferentially occupy equatorial sites neighbouring a charge compensating *V*<sub>Ti</sub> vacancy. # # In this notebook, we replicate this result using new DFT data, and then show that OH<sub>O</sub> similarly prefer to sit adjacent to the *V*<sub>Ti</sub> vacancy. For the [*V*<sub>Ti</sub> + 4OH<sub>O</sub>] defect complex, we find that a mix of equatorial and axial OH groups is slightly favourable relative to exclusive equatorial OH groups. # + import numpy as np from vasppy.calculation import * import matplotlib.pyplot as plt import version_information # %matplotlib inline # %config InlineBackend.figure_format = 'retina' # + # import figure_formatting from figure_formatting import rcParams from figure_formatting import master_formatting, nearly_black, tableau10 rcParams.update( master_formatting ) # - # ## Where do F<sub>O</sub> and OH<sub>O</sub> prefer to sit in relation to V<sub>Ti</sub>? # # Start by reading in the energies, stoichiometries, and titles for all calculations summarised in `vaspdata.yaml`. # + calcs = import_calculations_from_file( '../data/anion_location_test_data.yaml' ) # Check which calculation data we have read for c in calcs: print( c ) # - # Each calculation is a 4×4×2 anatase TiO<sub>2</sub> supercell with 4 substituted anions, and a charge compensating Ti vacancy. # There are three possible positions for each anion: # - adjacent to the Ti vacancy, in an equatorial position (denoted `_eq`), # - adjacent to the Ti vacancy, in an axial position (denoted `_ax`), # - separated from the Ti vacancy (denoted `_sep`). # ## Comparing pure F<sub>O</sub> configurations # + # %%capture --no-stdout --no-display error # Note: here we consider only equatorial vs. separated F_O. plt.figure( figsize=(5,4)) energies = np.array([]) for i in range(5): title = 'Anatase TiO2 4x4x2 V_Ti + {}F_eq + {}F_sep'.format( i, 4-i ) c = calcs[ title ] energies = np.hstack( [ energies, c.energy ] ) plt.plot( range(5), energies-energies[0], 'o' ) plt.xticks([0,1,2,3,4]) plt.xlabel(r'number of F in equatorial $V_\mathrm{Ti}$-adjacent sites') plt.ylabel('relative energy [eV]') plt.savefig( '../figures/F_O_configuration_energies.pdf' ) # - # F prefers to sit in an equatorial position adjacent to the *V*<sub>Ti</sub> site, rather than be separated, by approximately 1 eV per F. # # This replicates the trend reported in [Corradini et al., Sci. Rep. 5, 11553 (2015)](https://dx.doi.org/10.1038/srep11553), and in [Li et al. Chem. Mater. 27, 5014 (2015)](https://dx.doi.org/10.1021/acs.chemmater.5b01407). # ## Is this trend also seen for OH<sub>O</sub>? # + # %%capture --no-stdout --no-display error # Note: here we consider only equatorial vs. separated OH_O. plt.figure( figsize=(5,4)) energies = np.array([]) for i in range(5): title = 'Anatase TiO2 4x4x2 V_Ti + {}OH_eq + {}OH_sep'.format( i, 4-i ) c = calcs[ title ] energies = np.hstack( [ energies, c.energy ] ) plt.plot( range(5), energies-energies[0], 'o' ) plt.xticks([0,1,2,3,4]) plt.xlabel(r'number of OH in equatorial $V_\mathrm{Ti}$-adjacent sites') plt.ylabel('relative energy [eV]') plt.savefig( '../figures/OH_O_configuration_energies.pdf' ) # - # ### Is there any preference for axial versus equatorial F/OH around the Ti vacancy? # Any preference for axial vs equatorial F anions? for c in [ '2F_eq + 2F_ax', '3F_eq + 1F_ax', '3F_eq + 1F_ax alternate', '4F_eq + 0F_sep' ]: title = 'Anatase TiO2 4x4x2 V_Ti + {}'.format( c ) print( c, calcs[title].energy ) # There is very little difference in energy for these different arrangements of fluoride ions. The configuration with all fluoride ions equatorial is most stable (by ~48 meV). # Any preference for axial vs equatorial OH anions? for c in [ '2OH_eq + 2OH_ax', '4OH_eq + 0OH_sep' ]: title = 'Anatase TiO2 4x4x2 V_Ti + {}'.format( c ) print( c, calcs[title].energy ) # For the OH anions, having two OH axial and two OH equatorial is slightly more energetically favourable than having all four OH equatorial. A possible explanation for this difference versus the fluoride ions is that this mixed 2OH<sub>eq</sub> + 2OH<sub>ax</sub> arrangement allows four hydrogen bonds with O<sup>2-</sup> ions. # # <img src='figures/V_Ti+2OH_eq+2OH_ax.png' style='width: 280px;' /> # # %load_ext version_information # %version_information numpy, vasppy, matplotlib, version_information
analysis/defect_configurations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # %load template.py # # %load template.py import glob import io import ipyleaflet import IPython.display import ipyvolume.pylab as p3 import json import matplotlib.cm import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pdal import PIL import pyproj import requests import fiona import shapely.geometry from shapely.geometry import Polygon, mapping import scipy.spatial import sys import urllib.request # %load_ext autoreload # %autoreload 2 sys.path.append('../src') from pcl_utils import local_max # Url for aerial imagery # IVaerial = "https://geoservices.informatievlaanderen.be/raadpleegdiensten/ogw/wms?SERVICE=WMS&VERSION=1.3.0&REQUEST=GetMap&CRS=EPSG:31370&BBOX={0},{1},{2},{3}&WIDTH=512&HEIGHT=512&LAYERS=OGWRGB13_15VL&STYLES=default&FORMAT=image/png" # %matplotlib inline # - # look at tunnel site # from bigLaz- crop, bigLaz = "/media/thistle/Passport/gis/Idaho_Springs/lidar_downloads/outputs/bigLaz.laz" # get tunnel_poly polys = {} sites_fp = "/home/thistle/Documents/dev/ML/idaho1/data/gis/sites_of_interest/sites.shp" with fiona.open(sites_fp, "r") as f: for each in f: polys[each['properties']['name']] = each['geometry']['coordinates'][0] polys # looking at tunnel site tunnel = Polygon(polys['tunnel']) tunnel.to_wkt() bigLaz # + output_las = '/media/thistle/Passport/gis/Idaho_Springs/lidar_downloads/outputs/tunnel1.las' pipe1 = {"pipeline":[ { "type": "readers.las", "filename": bigLaz }, { "type": "filters.crop", "polygon": tunnel.to_wkt() }, { "type": "filters.hag" }, { "type": "filters.eigenvalues", "knn": 16 }, { "type": "filters.normal", "knn": 16 }, { "type": "writers.las", "filename": output_las } ]} # - pp = pdal.Pipeline(json.dumps(pipe1)) pp.validate() # %%time numpts = pp.execute() print(f"{numpts:,} processed") # + # # !dir(pp) # - arr = pp.arrays[0] description = arr.dtype.descr cols = [each for each, _ in description] print(cols) df = pd.DataFrame({col:arr[col] for col in cols}) df.shape # df.describe() # df.info() df.head() df['X_0'] = df['X'] df['Y_0'] = df['Y'] df['Z_0'] = df['Z'] df['X'] = df['X'] - df['X_0'].min() df['Y'] = df['Y'] - df['Y_0'].min() df['Z'] = df['Z'] - df['Z_0'].min() df['X_0'].min(), df['X'].min() df['X_0'].max(), df['X'].max() # + fig = p3.figure(width=1000) fig.xlabel='Y' fig.ylabel='Z' fig.zlabel='X' all_points = p3.scatter(df['Y'], df['Z'], df['X'], color='red', size=.2) p3.squarelim() p3.show() # you can see the powerlines well # - # Color ground in grey df['ground'] = df['Classification']==2 ground = p3.scatter(df.loc[df['ground'],'Y'].values, df.loc[df['ground'],'Z'].values, df.loc[df['ground'],'X'].values, color='red', size=.2) non_ground = p3.scatter(df.loc[~df['ground'],'Y'].values, df.loc[~df['ground'],'Z'].values, df.loc[~df['ground'],'X'].values, color='red', size=.2) fig.scatters.append(ground) fig.scatters.append(non_ground) all_points.visible = False ground.color='lightgrey' non_ground.visible = True # False df['Classification'].value_counts() ground_delaunay = scipy.spatial.Delaunay(df.loc[df['ground'],['X','Y']]) ground_surf = p3.plot_trisurf(df.loc[df['ground'],'Y'], df.loc[df['ground'],'Z'], df.loc[df['ground'],'X'], ground_delaunay.simplices, color='lightgrey') fig.meshes.append(ground_surf) # ground_surf.color = "salmon" ground.visible=False non_ground.visible=False # Color points according to flatness df['flatness'] = df['Eigenvalue0'] non_ground.color=matplotlib.cm.viridis(df.loc[~df['ground'],'flatness']*4)[:,0:3] # Separate between trees and the rest df['tree_potential'] = (df['Classification']==1) & (df['HeightAboveGround'] >= 2) & (df['flatness'] > .05) & (df['NumberOfReturns'] - df['ReturnNumber'] >= 1) df['other'] = ~df['ground'] & ~df['tree_potential'] tree_potential = p3.scatter(df.loc[df['tree_potential'],'Y'].values, df.loc[df['tree_potential'],'Z'].values, df.loc[df['tree_potential'],'X'].values, color=matplotlib.cm.viridis(df.loc[df['tree_potential'],'flatness']*4)[:,0:3], size=.2) other = p3.scatter(df.loc[df['other'],'Y'].values, df.loc[df['other'],'Z'].values, df.loc[df['other'],'X'].values, color=matplotlib.cm.viridis(df.loc[df['other'],'flatness']*4)[:,0:3], size=.2) non_ground.visible=False tree_potential.color='darkgreen' other.color='red' other.visible=False # + lep = local_max(df.loc[df['tree_potential'],['X','Y','Z','HeightAboveGround']], radius=3, density_threshold=5) treetop_spheres = p3.scatter(lep['Y'].values, lep['Z'].values, lep['X'].values, color='red', size=.5, marker='sphere') fig.scatters.append(treetop_spheres) # - treetop_spheres.color = matplotlib.cm.tab20(np.arange(len(lep['Z']))%10)[:,0:3] kdtree = scipy.spatial.kdtree.KDTree(lep[['X','Y','Z']]) dist, idx = kdtree.query(df.loc[df['tree_potential'],['X','Y','Z']].values) tree_potential.color=matplotlib.cm.tab20(idx%10)[:,0:3] df.loc[df['tree_potential'], 'tree_idx'] = idx medians = df.groupby('tree_idx')[['X','Y','Z']].median() for axis in ['X','Y','Z']: df['d'+axis] = df[axis] - df['tree_idx'].map(medians[axis]) df['radius'] = np.linalg.norm(df[['dX', 'dY', 'dZ']].values, axis=1) radii = pd.DataFrame([df.groupby('tree_idx')['radius'].quantile(.5), lep['HeightAboveGround'].values*.4]).min() scale = max(df['X'].max() - df['X'].min(), df['Y'].max() - df['Y'].min()) treetop_spheres.x = medians['Y'] treetop_spheres.y = medians['Z'] treetop_spheres.z = medians['X'] treetop_spheres.size = radii * 100 / scale # + # the power line displays quite distinctly # - tree_potential.visible = False other.visible = False other.visible = True treetop_spheres.color='darkgreen' p3.style.use('minimal') # + # Continue working to find tallest trees not including powerlines
notebooks/Tall_Trees_at_project_site.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) # Any results you write to the current directory are saved as output. # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline from sklearn.model_selection import train_test_split, GridSearchCV from IPython.display import display import seaborn as sns from sklearn.metrics import accuracy_score,f1_score,recall_score,precision_score,classification_report from sklearn.metrics import confusion_matrix df = pd.read_csv("../input/kc_house_data.csv") X = df[['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_basement', 'yr_built', 'zipcode', 'sqft_living15', 'sqft_lot15']] y = df['price'] d = pd.get_dummies(df['zipcode'].astype('str')) X = pd.concat([X,d],axis=1) X = X.drop('zipcode',axis=1) # + _uuid="03318a8cec1f95b882846299e4f4ae72b6ae974d" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234) X_train,X_val,y_train,y_val = train_test_split(X_train, y_train, test_size=0.3, random_state=1234) from sklearn import preprocessing scalerX = preprocessing.StandardScaler() X_train = scalerX.fit_transform(X_train) scalerY = preprocessing.StandardScaler() y_train = scalerY.fit_transform(np.log(y_train).reshape(-1,1)) X_val = scalerX.transform(X_val) y_val = scalerY.transform(np.log(y_val).reshape(-1,1)) X_test = scalerX.transform(X_test) y_test = scalerY.transform(np.log(y_test).reshape(-1,1)) import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.optimizers import SGD,RMSprop, Adagrad, Adadelta, Adam input_dim = len(X.columns) model = Sequential() model.add(Dense(128, activation='relu', input_dim=input_dim)) model.add(Dense(64, activation='relu', input_dim=128)) model.add(Dense(32, activation='relu', input_dim=64)) model.add(Dense(1)) sgd = SGD(lr=5e-3,nesterov=False) #rms = RMSprop(lr=0.01) #adag = Adagrad(lr=0.01) #adad = Adadelta(lr=0.01) #adam = Adam(lr=0.01) model.compile(loss='mean_absolute_error', optimizer=sgd, metrics=['mean_absolute_error']) # + _uuid="76fd513defa3f1a1caca7ca6961c825bd78f6ae9" fit = model.fit(X_train, y_train, epochs=500, batch_size=512,validation_data=(X_val, y_val),verbose=0) df = pd.DataFrame(fit.history) df[["loss", "val_loss"]].plot() plt.ylabel("loss") plt.xlabel("epoch") plt.show() # + _uuid="35becf82cbe3ae662f6c002b4b93d62762a0d535" y_true = np.exp(scalerY.inverse_transform(y_test)).reshape(-1) y_pred = np.exp(scalerY.inverse_transform(fit.model.predict(X_test))).reshape(-1) np.abs(y_pred-y_true).mean() # + _uuid="1e2b074a3222cc9f589302eab5287cba327f0be9"
downloaded_kernels/house_sales/kernel_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <b>Calcule a integral dada</b> # $4. \int e^{5x}dx$ # $u = 5x$ # $du = 5dx$ # $\frac{du}{5} = dx$ # <b>Substituindo $5x$ por $u$</b> # $\int \frac{e^u}{5}du$ # <b>Integrando $\int \frac{e^u}{5}du$</b> # $\frac{1}{5} \cdot \int e^u du = e^u + C$ # $\frac{1}{5}\int e^u du = \frac{1}{5} e^u + c$ # <b>Substituindo $u$ por $5x$</b> # $\frac{1}{5}\int e^u du = \frac{1}{5} e^{5x} + c$
Problemas 5.2/04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('obj_recon') # language: python # name: python3 # --- from pathlib import Path import json dataset_root = Path('/raid/yuang/Data/CO3D_ALL') category = 'hydrant' # ## check original eval_singleseq files def load_eval_singleseq(root, category, suffix=''): path = root / category / ('eval_batches_singlesequence.json' + suffix) with open(path, 'r') as f: eval_singleseq = json.load(f) return eval_singleseq car_eval_singleseq = load_eval_singleseq(dataset_root, 'car', suffix='.original') car_seq_ids = set(seq[0][0] for seq in car_eval_singleseq) car_seq_ids car_eval_singleseq[0] eval_singleseq = load_eval_singleseq(dataset_root, category) seq_ids = set(seq[0][0] for seq in eval_singleseq) seq_frames = {s: set() for s in seq_ids} for seq in eval_singleseq: for frame in seq: seq_id = frame[0] assert frame[1] not in seq_frames[seq_id] seq_frames[seq_id].add(frame[1]) seq_frames # ## create eval_singleseq files category = 'hydrant' target_seq_id = '106_12660_22718' # check if target id in multiseq def parse_from_multiseq(root, cat, target_id): path = root / cat / 'eval_batches_multisequence.json' with open(path, 'r') as f: seqs = json.load(f) target_seqs = [seq for seq in seqs if seq[0][0] == target_id] # sort descendingly target_seqs = sorted(target_seqs, key=lambda x: len(x), reverse=True) return target_seqs def parse_from_setlist(root, cat, target_id): set_list_path = dataset_root / category / 'set_lists.json' with open(set_list_path, 'r') as f: set_list = json.load(f) splits = ['train_known', 'train_unseen', 'test_known', 'test_unseen'] split_dict = {s: set() for s in splits} assert set(set_list.keys()) == set(splits) for split in splits: for f in set_list[split]: split_dict[split].add(f[0]) target_split = None for split, seq_ids in split_dict.items(): if target_id in seq_ids: target_split = split break return target_split parse_from_setlist(dataset_root, category, target_seq_id) set_list['test_known'][100:110] target_ss_seqs = parse_from_multiseq(dataset_root, category, target_seq_id) [[f[1] for f in seq] for seq in target_ss_seqs] # save singleseq def save_new_singleseq(root, cat, new_singleseq): path = root / cat / 'eval_batches_singlesequence.json' if path.exists(): backup_path = root / cat / 'eval_batches_singlesequence.json.original' path.rename(backup_path) with open(path, 'w') as f: json.dump(new_singleseq, f) return path new_singleseq_path = save_new_singleseq(dataset_root, category, target_ss_seqs) new_singleseq_path
projects/implicitron_trainer/scripts/create_singleseq/create_singleseq.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # Data Quality Reports # + [markdown] deletable=true editable=true # ### Load data and extract categorical and continuous column headers # + deletable=true editable=true import pandas as pd df = pd.read_csv('data/training_data.csv', index_col=0) categorical_features = ['at_home', 'winning_at_half_time','won_match'] categorical_features = ['full_time_result', 'half_time_result'] continuous_features = [col for col in df.columns if col not in categorical_features] # + [markdown] deletable=true editable=true # ### Continuous features # + deletable=true editable=true cont_df = df[continuous_features] cont_df # + [markdown] deletable=true editable=true # ### Categorical features # + deletable=true editable=true cat_df = df[categorical_features] cat_df # + [markdown] deletable=true editable=true # ### Function for building the continuous data quality report # + deletable=true editable=true import collections def continuous_data_quality_report(df): data_cols = df.columns counts = [] perc_missing = [] cardinality = [] mins = [] first_quartiles = [] means = [] medians = [] third_quartiles = [] maxs = [] std_devs = [] for col in data_cols: counts.append(len(df[col])) perc_missing.append(df[col].isnull().sum() / counts[-1] * 100) cardinality.append(len(df[col].unique())) mins.append(df[col].min(skipna=True)) first_quartiles.append(df[col].quantile(0.25)) means.append(df[col].mean()) medians.append(df[col].median()) third_quartiles.append(df[col].quantile(0.75)) maxs.append(df[col].max()) std_devs.append(df[col].std()) data = collections.OrderedDict(( ('Feature', data_cols), ('Count', counts), ('% Missing', perc_missing), ('Cardinality', cardinality), ('Min', mins), ('1st Quartile', first_quartiles), ('Mean', means), ('Median', medians), ('3rd Quartile', third_quartiles), ('Max', maxs), ('Std Dev', std_devs), )) return pd.DataFrame(data) # + [markdown] deletable=true editable=true # ### Function for building categorical data quality report # + deletable=true editable=true import collections def categorical_data_quality_report(df): data_cols = df.columns counts = [] perc_missing = [] cardinality = [] modes = [] mode_counts = [] mode_percs = [] second_modes = [] second_mode_counts = [] second_mode_percs = [] for col in data_cols: counts.append(len(df[col])) perc_missing.append(list(cat_df[col].values).count('') / counts[-1] * 100) cardinality.append(len(df[col].unique())) modes.append(df[col].value_counts().index[0]) mode_counts.append(df[col].value_counts().iloc[0]) mode_percs.append(mode_counts[-1] / counts[-1] * 100) if cardinality[-1] > 1: second_modes.append(df[col].value_counts().index[1]) second_mode_counts.append(df[col].value_counts().iloc[1]) second_mode_percs.append(second_mode_counts[-1] / counts[-1] * 100) else: second_modes.append('N/A') second_mode_counts.append('N/A') second_mode_percs.append('N/A') data = collections.OrderedDict(( ('Feature', data_cols), ('Count', counts), ('% Missing', perc_missing), ('Cardinality', cardinality), ('Mode', modes), ('Mode Count', mode_counts), ('Mode %', mode_percs), ('2nd Mode', second_modes), ('2nd Mode Count', second_mode_counts), ('2nd Mode %', second_mode_percs), )) return pd.DataFrame(data) # + deletable=true editable=true continuous_data_quality_report(cont_df) # + deletable=true editable=true categorical_data_quality_report(cat_df)
DataQualityReports.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!pip install pyspark # - from pyspark.sql import SparkSession from pyspark.ml import Pipeline from pyspark.sql.functions import mean,col,split, col, regexp_extract, when, lit, avg from pyspark.ml.feature import StringIndexer from pyspark.ml.feature import VectorAssembler from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.ml.feature import QuantileDiscretizer spark = SparkSession.builder.appName("PySparkTitanikJob").getOrCreate() spark titanic_df = spark.read.csv('train.csv', header = 'True', inferSchema='True') display(titanic_df) titanic_df.printSchema() passengers_count = titanic_df.count() titanic_df.show(5) gropuBy_output = titanic_df.groupBy("Survived").count() gropuBy_output.show() avg_age = round(titanic_df.select(avg(col('Age'))).collect()[0][0],0) avg_age titanic_df = titanic_df.fillna({'Age': avg_age}) titanic_df.show() titanic_df = titanic_df.fillna({"Embarked" : 'S'}) titanic_df = titanic_df.drop("Cabin") titanic_df = titanic_df.withColumn("Family_Size",col('SibSp')+col('Parch')) titanic_df = titanic_df.withColumn('Alone',lit(0)) titanic_df = titanic_df.withColumn("Alone",when(titanic_df["Family_Size"] == 0, 1).otherwise(titanic_df["Alone"])) data_df = titanic_df titanic_df.show(2) indexers = [StringIndexer(inputCol=column, outputCol=column+"_index").fit(titanic_df) for column in ["Sex","Embarked"]] pipeline = Pipeline(stages=indexers) titanic_df = pipeline.fit(titanic_df).transform(titanic_df) titanic_df = titanic_df.drop("PassengerId","Name","Ticket","Cabin","Embarked","Sex") titanic_df.show() feature = VectorAssembler(inputCols=titanic_df.columns[1:],outputCol="features") feature_vector= feature.transform(titanic_df) feature_vector.show() (training_data, test_data) = feature_vector.randomSplit([0.8, 0.2],seed = 42) training_data.show() # # ML models # # LogisticRegression from pyspark.ml.classification import LogisticRegression lr = LogisticRegression(labelCol="Survived", featuresCol="features") #Training algo lrModel = lr.fit(training_data) lr_prediction = lrModel.transform(test_data) lr_prediction.select("prediction", "Survived", "features").show(5) evaluator = MulticlassClassificationEvaluator(labelCol="Survived", predictionCol="prediction", metricName="accuracy") lr_accuracy = evaluator.evaluate(lr_prediction) print("LogisticRegression [Accuracy] = %g"% (lr_accuracy)) print("LogisticRegression [Error] = %g " % (1.0 - lr_accuracy)) # # DecisionTreeClassifier # + from pyspark.ml.classification import DecisionTreeClassifier dt = DecisionTreeClassifier(labelCol="Survived", featuresCol="features") dt_model = dt.fit(training_data) dt_prediction = dt_model.transform(test_data) dt_prediction.select("prediction", "Survived", "features").show(5) # - dt_accuracy = evaluator.evaluate(dt_prediction) print("DecisionTreeClassifier [Accuracy] = %g"% (dt_accuracy)) print("DecisionTreeClassifier [Error] = %g " % (1.0 - dt_accuracy)) # # RandomForestClassifier from pyspark.ml.classification import RandomForestClassifier rf = RandomForestClassifier(labelCol="Survived", featuresCol="features") rf_model = rf.fit(training_data) rf_prediction = rf_model.transform(test_data) rf_prediction.select("prediction", "Survived", "features").show(5) rf_accuracy = evaluator.evaluate(rf_prediction) print("RandomForestClassifier [Accuracy] = %g"% (rf_accuracy)) print("RandomForestClassifier [Error] = %g" % (1.0 - rf_accuracy)) # # Gradient-boosted tree classifier from pyspark.ml.classification import GBTClassifier gbt = GBTClassifier(labelCol="Survived", featuresCol="features",maxIter=10) gbt_model = gbt.fit(training_data) gbt_prediction = gbt_model.transform(test_data) gbt_prediction.select("prediction", "Survived", "features").show(5) gbt_accuracy = evaluator.evaluate(gbt_prediction) print("Gradient-boosted [Accuracy] = %g"% (gbt_accuracy)) print("Gradient-boosted [Error] = %g"% (1.0 - gbt_accuracy)) # # Save & Load Model rf_model.save('rf_model') from pyspark.ml.classification import RandomForestClassificationModel type(RandomForestClassificationModel.load('rf_model')) # # Pipeline from pyspark.ml.pipeline import PipelineModel data_df = data_df.drop("PassengerId","Name","Ticket","Cabin") train, validate = data_df.randomSplit([0.8, 0.2]) train.show(5) indexer_sex = StringIndexer(inputCol="Sex", outputCol="Sex_index") indexer_embarked = StringIndexer(inputCol="Embarked", outputCol="Embarked_index") feature = VectorAssembler( inputCols=["Pclass","Age","SibSp","Parch","Fare","Family_Size","Embarked_index","Sex_index"], outputCol="features") rf_classifier = RandomForestClassifier(labelCol="Survived", featuresCol="features") pipeline = Pipeline(stages=[indexer_sex, indexer_embarked, feature, rf_classifier]) p_model = pipeline.fit(train) type(p_model) p_model.write().overwrite().save('p_model') model = PipelineModel.load('p_model') prediction = p_model.transform(validate) validate.show(5) prediction.select(["Pclass","Age","SibSp","Parch","Fare","Family_Size","Embarked_index","Sex_index"]).show(5) prediction.printSchema() evaluator = MulticlassClassificationEvaluator(labelCol="Survived", predictionCol="prediction", metricName="accuracy") p_accuracy = evaluator.evaluate(prediction) print("Pipeline model [Accuracy] = %g"% (p_accuracy)) print("Pipeline model [Error] = %g " % (1.0 - p_accuracy))
Week5_SparkML/SparkML/spark-practice/.ipynb_checkpoints/TitanikSparkML-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 2D animations #import usual things import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # use a function from hermite library to read in data from hermite_library import read_hermite_solution_from_file # I'll use this function to read in previously stored data # order is: # time, energy, position, velocity t_h, E_h, r_h, v_h = read_hermite_solution_from_file('myPlanetSystem_kepler101_solution1.txt') t_h # array in seconds E_h # energy as a function of time, normalized units # + r_h # position vector, [number of particles, coordinates (0,1,2)=(x,y,z), time step] # first particle, y coord, timestep 6 r_h[0, 1, 5] r_h.shape # + # let's make a quick figure # first create a figure & axes object to store a 1x2 plot fig, ax = plt.subplots(1, 2, figsize=(10*2, 10)) # loop over all particles and plot their x/y coordinates at all times for i in range(r_h.shape[0]): # loop over all particles ax[0].plot(r_h[i, 0, :], r_h[i, 1, :]) # above is # plot(r_h[ith particle, x axis, all timesteps], r_h[ith particle, y axis, all timesteps]) # label the first axis ax[0].set_xlabel('x in AU', fontsize=20) # set label on horizontal axis ax[0].set_ylabel('y in AU', fontsize=20) # label on vertical axis #ax[0].set_ticksize(15) # also plot energy as a function of time ax[1].plot(t_h, E_h) # plotting time vs energy plt.show() # show the plot! # - # # Lets (finally) make an animation # !pip install JSAnimation from JSAnimation.IPython_display import display_animation # actually plot a movie in notebook from matplotlib import animation # use a stepsize for subsampling data to save on computation stepSize = 500 # + # actually subsample our dataset # r[number of particles, number of coordinates, number of times] r = r_h[:,:,0:-1:stepSize] # t and E *no* coordinates, just timesteps t = t_h[0:-1:stepSize] E = E_h[0:-1:stepSize] t.shape, E_h.shape # + # we'll plot this subsampled dataset again fig, ax = plt.subplots(1,2,figsize=(10*2,10)) # generating figure and axes for a 1x2 plot for i in range(r.shape[0]): # loop over number of particles in subsampled position ax[0].plot(r[i,0,:],r[i,1,:]) # plotting for each particle x vs y for all timesteps in subsampled position # plotting subsampled energy on 2nd plot ax[1].plot(t,E) plt.show() # - # first, make a figure on which to plot stuff fig, ax = plt.subplots(1,2, figsize=(10*2,10)) # 1x2 figure # import an animation function to take our positions, t, E and make animation objects from animations_library import plot_animations # use this function to generate a few objects needed for the matplotlib animation function init, animate, nFrames = plot_animations(fig, ax, t, E, r) # + #r # + # use the above to make an animation & display it anim = animation.FuncAnimation(fig, animate, init_func=init, frames = nFrames, interval=20, blit=True) # plot on the figure object, with a function that animates # with our initialization function, over nFrames numbers of frames # use an interval between frames of 20 millisec, and don't redraw everything # finally, display this animation #display_animation(anim) anim # - # ## saving animations anim.save('myAnimation.gif', writer='imagemagick') # + # for mp4 formatting # if error along the lines of "no ffmpeg" # #!conda install -c conda-forge ffmpeg --yes Writer = animation.writers['ffmpeg'] # save with an movie writing software called ffmpeg writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800) # formating for ffmpeg # - anim.save('anim.mp4', writer=writer) # ## Exercises # Redo this generation of animations with your dataset. # # Other stuff: try modifying things in the `animations_library.py` # * change the colors of things being plotted (hint: google colors for plots in python) # * use different plotting symbols (hint: google marker styles for python) # * plot y/z or x/z # * adding in more plots than just 1 coordinate plot (1x4) # * check out what is available for a 3D projection plot (hint: google projection 3d, 3d plots, in python) # # Interactive/Data/Info Viz import pandas as pd planets = pd.read_csv('planets_2019.07.12_17.16.25.csv', sep=',', comment='#') planets planets.loc[0:3] # first 4 entries of dataset # columns of dataset planets.columns # how many unique stars are here (multiplanet systems) planets['pl_hostname'].unique() planets['pl_hostname'].nunique() # print out stats about my data easily planets.describe() # make a quick plot planets['pl_orbeccen'].plot() # make a histogram using pandas myPlot = planets['pl_orbeccen'].hist() myPlot.set_xlabel('Eccentricity') # setting my label on my x-axis myPlot.set_ylabel('Number of planets with given Eccentricy') # y-axis # + # doing the same plot another way with our maplotlib framework fig, ax = plt.subplots(1,1, figsize=(5,5)) planets['pl_orbeccen'].hist(ax=ax) ax.set_xlabel('Eccentricity') ax.set_ylabel('Number of planets with given eccentricity') plt.show() # - # trying a different plotting style with plt.style.context('ggplot'): # making sure just the plot under here is with 'ggplot' style fig, ax = plt.subplots(1,1, figsize=(5,5)) # fig, ax object planets['pl_orbeccen'].hist(ax=ax) # histogram on these axes ax.set_xlabel('Ecc') # horizontal axes def ax.set_ylabel('# of planets at ecc') # vertical axes def plt.show() plt.style.available # we'll use the widgets library to do this interactively import ipywidgets # use something funny called a decorator function, this "decorates" whatever comes right # after it @ipywidgets.interact(style=plt.style.available) # this will make the style interactive def make_plot(style): # function to make a plot with a given style with plt.style.context(style): # do plot with variable styles fig, ax = plt.subplots(1,1,figsize=(5,5)) # same fig, ax as before planets['pl_orbeccen'].hist(ax=ax) # histogram on axes # label x & y ax.set_xlabel('ecc') ax.set_ylabel('# of planets with ecc') plt.show() # let's also change the number of bins *and* the plot style @ipywidgets.interact(style=plt.style.available, number_of_bins=range(1,20,1)) def make_plot(style, number_of_bins): with plt.style.context(style): fig, ax = plt.subplots(1,1,figsize=(5,5)) # makeing fig, ax planets['pl_orbeccen'].hist(ax=ax, bins=number_of_bins) # set x&y labels ax.set_xlabel('ecc') ax.set_ylabel('# of planets at ecc') plt.show() # ### Exercise # Pick another variable beside eccentricity and redo this. # # Other things to try: # * plot other variables - like an x/y plot # * try: x = planets['pl_orbeccen']; y = ['pl_orbper'] # * then: ax.plot(x,y) # * make colors a variable # * plot multiple lines planets['gaia_dist'].shape # !conda list # + # animation.FuncAnimation? # + ax[1].set_xlim(r.min(), r.max()) ax[1].set_xlim(r[:,1,:].min(), r[:,1,:].max()) # for y on the horizontal axes
_site/lesson06/inClass_lesson06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd weather_train = pd.read_csv("../data/raw/weather_train.csv") weather_train.head() from meteocalc import feels_like, Temp, dew_point, wind_chill, heat_index weather_train.shape temp_format = weather_train.air_temperature.apply(lambda x : Temp(x, 'c')) weather_train.loc[1,]["air_temperature"] import math def compute_humidity(row): CONSTANTS = dict( positive=dict(b=17.368, c=238.88), negative=dict(b=17.966, c=247.15), ) T = row["air_temperature"] const = CONSTANTS['positive'] if T > 0 else CONSTANTS['negative'] dp = row["dew_temperature"] pa = math.exp(dp * const['b'] / (const['c'] + dp)) rel_humidity = pa * 100. * 1 / math.exp(const['b'] * T / (const['c'] + T)) return(rel_humidity) weather_train["relative_humidity"] = weather_train.apply(lambda x: compute_humidity(x), axis = 1) weather_train.head() max(weather_train.relative_humidity) dew_point(temperature = weather_train["air_temperature"][1], humidity = weather_train["relative_humidity"][1]).f weather_train["air_temp_f"] = weather_train["air_temperature"] * 9 / 5. + 32 def feels_like_custom(row): temperature = row["air_temp_f"] wind_speed = row["wind_speed"] humidity = row["relative_humidity"] fl = feels_like(temperature, wind_speed, humidity) out = fl.c return(out) weather_train["feels_like_temp"] = weather_train.apply(lambda x : feels_like_custom(x), axis = 1)
notebooks/10-dl-feels-like.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import pandas as pd import os from utils.training import plot_history # - # experiments = !ls -1 ./checkpoints print(*['%3i %s' % (i, v) for i, v in enumerate(experiments.list)], sep='\n') experiments = [ '201809231008_sl512_synthtext', # SegLink '201806021007_dsodsl512_synthtext', # SegLink with DenseNet and Focal Loss '201807091503_dsodtbpp512fl_synthtext', # TextBoxes++ with DennseNet and Focal Loss '202003070004_dstbpp512fl_synthtext', # TextBoxes++ with Dense Blocks, Separable Convolution and Focal Loss ] # ### History # + # experiments = !ls -1 ./checkpoints names = [ 'loss', # SSD 'conf_loss', 'loc_loss', #'pos_conf_loss', 'neg_conf_loss', 'pos_loc_loss', 'precision', 'recall', 'fmeasure', # SegLink 'seg_precision', 'seg_recall', 'seg_fmeasure', #'link_precision', 'link_recall', 'link_fmeasure', #'pos_seg_conf_loss', 'neg_seg_conf_loss', 'seg_loc_loss', 'pos_link_conf_loss', 'neg_link_conf_loss', #'num_pos_seg', 'num_neg_seg', 'num_pos_link', 'num_neg_link', #'seg_conf_loss', 'seg_loc_loss', 'link_conf_loss', ] #names = None plot_history(experiments[-4:], names) # - # ### Playground
plot_history.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # JOHNSTON - 67-Markers Data Set v.7a # + # Copyright (C) 2018 <NAME>. All rights reserved. # Notebook to produce a Phylogenetic tree (dendrogram) of the Johnston Y-DNA kit results # Based on the work done by <NAME> and <NAME> # %matplotlib inline from scipy.cluster.hierarchy import dendrogram , linkage from scipy.stats import pearsonr import numpy as np import pandas as pd from scipy.spatial.distance import pdist from matplotlib import pyplot as plt import math I = 'JOHNSTON - 67-Markers Data Set-v.7a' Rscale = 34.65 # read the PDF C = pd.read_csv("{input}.csv".format(input=I)).values # extract the kits as rows of observations and columns of attributes (markers) K = C[:,2:69] # extract the kit numbers and names to produce a labels matrix L = [] for ix,row in enumerate(C): L.append("#{ix} {kit} {name}".format(ix=ix+1,kit=row[0],name=row[1])) # calculate a condensed distance matrix consisting of RCC values as per Bill Howards paper # the condensed distance matrix is a single dimension matrix containing # the top triangle of a two dimensional distance matrix # e.g. [1,2,3,4] # [2,1,2,3] # [3,2,1,2] # [4,3,2,1] # becomes a condensed distance matrix Y of [2,3,4,2,3,2] Y = [] X = [] for i1,v1 in enumerate(K): X1 = [] for i2,v2 in enumerate(K): if i2 > i1: Y.append((1/pearsonr(v1,v2)[0]-1)*10000.0) X1.append((1/pearsonr(v1,v2)[0]-1)*10000.0) X.append(X1) # perform the agglomerative clustering using the average method as per Bill Howards paper Z = linkage(Y,method='weighted',optimal_ordering=False) # perform linkage reordering such that the shorter branch is first, the longer branch second # the lower index is first, the higher index is second for link in Z: leftDepth = link[0] if leftDepth >= len(L): leftDepth = Z[int(leftDepth)-len(L),3] else: leftDepth = 1 rightDepth = link[1] if rightDepth >= len(L): rightDepth = Z[int(rightDepth)-len(L),3] else: rightDepth = 1 if leftDepth < rightDepth: t = link[0] link[0] = link[1] link[1] = t elif link[0] < link[1] and link[0] < len(L) and link[1] < len(L): t = link[0] link[0] = link[1] link[1] = t # plot the cluster hierarchy produced by linkage as a dendrogram F = plt.figure(figsize=(16,20),dpi=72) # A1 paper plt.title(I) plt.xlabel("RCC") plt.grid(True,which='major',axis='x',color='g',linestyle='dashed') plt.minorticks_on() plt.tick_params(axis='x',which='minor') plt.tick_params(axis='y',which='minor',length=0) plt.xticks(np.arange(24)) D = dendrogram(Z,labels=L,color_threshold=3.5,leaf_font_size=12,leaf_rotation=0,orientation='left') for i, d, c in zip(D['icoord'], D['dcoord'], D['color_list']): y = 0.5 * sum(i[1:3]) x = d[1] if x > 0: plt.plot(x, y, 'o', c=c) yr = math.floor((1943 - int(x* Rscale))/10)*10 if yr >= 0: yr_txt = "{yr}AD".format(yr=yr) else: yr_txt = "{yr}BC".format(yr=-yr) rcc_txt = int(x*10)/10 plt.annotate("%s" % yr_txt, (x, y), xytext=(-6, 0), textcoords='offset points', color='r', va='center', ha='center', rotation=90) plt.annotate("%s" % rcc_txt, (x, y), xytext=(+7, 0), textcoords='offset points', color='r', va='center', ha='center', rotation=90) plt.annotate("RCC = {rscale} years".format(rscale=Rscale),(0,0),xytext=(0,-5)) F.subplots_adjust(left=0.05, right=0.85, top=0.97, bottom=0.05) plt.savefig("{input}.jupyter.png".format(input=I)) # cite: http://www.jogg.info/pages/72/files/Howard.htm # Dating Y-DNA Haplotypes on a Phylogenetic Tree: Tying the Genealogy of Pedigrees and Surname Clusters into Genetic Time Scales # <NAME> and <NAME> # -
jupyter/JOHNSTON - 67-Markers Data Set-v.7a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (pytorch) # language: python # name: pytorch # --- # # Federated learning: using a TensorFlow model # # This notebook is a copy of the notebook [Federated learning basic concepts](./federated_learning_basic_concepts.ipynb). The difference is that, here, the model is built by defining a custom layer. However, apart from that, the structure is identical so the text has been removed for clearness. Please refer to the original notebook for the detailed description of the experiment. # # ## The data # + import shfl database = shfl.data_base.Emnist() train_data, train_labels, test_data, test_labels = database.load_data() print(len(train_data)) print(len(test_data)) print(type(train_data[0])) train_data[0].shape import matplotlib.pyplot as plt plt.imshow(train_data[0]) iid_distribution = shfl.data_distribution.IidDataDistribution(database) federated_data, test_data, test_label = iid_distribution.get_federated_data(num_nodes=20, percent=10) print(type(federated_data)) print(federated_data.num_nodes()) federated_data[0].private_data # - # ## The model # + import tensorflow as tf #If you want execute in GPU, you must uncomment this two lines. # physical_devices = tf.config.experimental.list_physical_devices('GPU') # tf.config.experimental.set_memory_growth(physical_devices[0], True) class CustomDense(tf.keras.layers.Layer): """ Implementation of Linear layer Attributes ---------- units : int number of units for the output w : matrix Weights from the layer b : array Bias from the layer """ def __init__(self, units=32, **kwargs): super(CustomDense, self).__init__(**kwargs) self._units = units def get_config(self): config = {'units': self._units} base_config = super(CustomDense, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): """ Method for build the params Parameters ---------- input_shape: list size of inputs """ self._w = self.add_weight(shape=(input_shape[-1], self._units), initializer='random_normal', trainable=True) self._b = self.add_weight(shape=(self._units,), initializer='random_normal', trainable=True) def call(self, inputs): """ Apply linear layer Parameters ---------- inputs: matrix Input data Return ------ result : matrix the result of linear transformation of the data """ return tf.nn.bias_add(tf.matmul(inputs, self._w), self._b) def model_builder(): inputs = tf.keras.Input(shape=(28, 28, 1)) x = tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1)(inputs) x = tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')(x) x = tf.keras.layers.Dropout(0.4)(x) x = tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1)(x) x = tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')(x) x = tf.keras.layers.Flatten()(x) x = CustomDense(128)(x) x = tf.nn.relu(x) x = tf.keras.layers.Dropout(0.1)(x) x = CustomDense(64)(x) x = tf.nn.relu(x) x = CustomDense(10)(x) outputs = tf.nn.softmax(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) criterion = tf.keras.losses.CategoricalCrossentropy() optimizer = tf.keras.optimizers.RMSprop() metrics = [tf.keras.metrics.categorical_accuracy] return shfl.model.DeepLearningModel(model=model, criterion=criterion, optimizer=optimizer, metrics=metrics) # - aggregator = shfl.federated_aggregator.FedAvgAggregator() federated_government = shfl.federated_government.FederatedGovernment(model_builder, federated_data, aggregator) # + import numpy as np class Reshape(shfl.private.FederatedTransformation): def apply(self, labeled_data): labeled_data.data = np.reshape(labeled_data.data, (labeled_data.data.shape[0], labeled_data.data.shape[1], labeled_data.data.shape[2],1)) class CastFloat(shfl.private.FederatedTransformation): def apply(self, labeled_data): labeled_data.data = labeled_data.data.astype(np.float32) shfl.private.federated_operation.apply_federated_transformation(federated_data, Reshape()) shfl.private.federated_operation.apply_federated_transformation(federated_data, CastFloat()) # - # ## Run the federated learning experiment test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], test_data.shape[2],1)) test_data = test_data.astype(np.float32) federated_government.run_rounds(3, test_data, test_label)
notebooks/federated_learning/federated_learning_basic_concepts_tensorflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import tensorflow.contrib.eager as tfe import time tfe.enable_eager_execution() X = np.random.rand(1000, 3) theta = np.array([2, 3, 4]).reshape(3, 1) y = X @ theta y_err = y + 1 + np.random.randn(1000, 1) w = tf.get_variable(name='w', shape=(3, 1), dtype=tf.double) b = tf.get_variable(name='b', shape=(1, ), dtype=tf.double) @tfe.implicit_value_and_gradients def foo(x, y): return tf.losses.mean_squared_error(y, tf.matmul(x, w) + b) optimizer = tf.train.GradientDescentOptimizer(0.1) ds = tf.data.Dataset.from_tensor_slices((X, y_err)).shuffle(1000).batch(50) begin = time.time() for epoch in range(20): for step, (x, y) in enumerate(tfe.Iterator(ds)): loss, grad = foo(x, y) optimizer.apply_gradients(grad) # print(f'step:{step}; loss:{loss.numpy()}') else: if epoch == 19: loss, grad = foo(X, y_err) print(f'epoch: {epoch}; loss: {loss.numpy()}') end = time.time() print(end - begin)
testzie/gradient_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Setup # ## Imports from tqdm import tqdm_notebook import gym # PyTorch modules # + import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from vai.torch.utils import cuda # - # ## Define Useful Features env = gym.make('Pong-ram-v0') n = env.observation_space.shape[0] a = env.action_space.n # # Create Model class Policy(nn.Module): def __init__(self): super().__init__() self.fc = nn.Linear(n, 32 * 4) self.bn1 = nn.BatchNorm1d(32) self.conv1 = nn.ConvTranspose1d(32, a, 3, 2, padding=2, output_padding=1) def forward(self, x): x = F.relu(self.bn1(self.fc(x).view(-1, 32, 4))) return self.conv1(x) def get_policy(scores): return F.softmax(torch.transpose(scores, 0, 1)).max(1)[1] policy = cuda(Policy()) optimizer = optim.Adam(policy.parameters()) action_size = 6 prog_bar = tqdm_notebook(range(1000)) for epoch in prog_bar: env.reset() done = False obs = env.observation_space.sample() last_obs = cuda(torch.from_numpy(obs.astype(np.float32))) scores = policy(Variable(last_obs).unsqueeze(0)).squeeze(0) actions = get_policy(scores) action_roll = actions.data.cpu().numpy() time_step = 0 total_reward = 0 epoch_reward = 0 while not done: if time_step == action_size: grads = np.zeros((a, action_size), np.float32) grads[action_roll, np.arange(action_size)] = -total_reward new_scores = policy(Variable(last_obs, volatile=True).unsqueeze(0)).squeeze(0) while (get_policy(new_scores) == actions).data.all(): optimizer.zero_grad() scores.backward(cuda(torch.from_numpy(grads)), retain_graph=True) optimizer.step() new_scores = policy(Variable(last_obs, volatile=True).unsqueeze(0)).squeeze(0) grads = np.zeros((a, action_size), np.float32) optimizer.zero_grad() scores.backward(cuda(torch.from_numpy(grads))) optimizer.step() last_obs = cuda(torch.from_numpy(obs.astype(np.float32))) scores = policy(Variable(last_obs).unsqueeze(0)).squeeze(0) actions = get_policy(scores) action_roll = actions.data.cpu().numpy() time_step = 0 total_reward = 0 #env.render() obs, r, done, _ = env.step(action_roll[time_step]) total_reward += r epoch_reward += total_reward time_step += 1 prog_bar.desc = str(int(epoch_reward)) epoch_reward = 0 # + env.reset() done = False obs = env.observation_space.sample() last_obs = cuda(torch.from_numpy(obs.astype(np.float32))) scores = policy(Variable(last_obs, volatile=True).unsqueeze(0)).squeeze(0) actions = get_policy(scores) action_roll = actions.data.cpu().numpy() time_step = 0 total_reward = 0 epoch_reward = 0 while not done: if time_step == action_size: last_obs = cuda(torch.from_numpy(obs.astype(np.float32))) scores = policy(Variable(last_obs, volatile=True).unsqueeze(0)).squeeze(0) actions = get_policy(scores) action_roll = actions.data.cpu().numpy() time_step = 0 total_reward = 0 env.render() obs, r, done, _ = env.step(action_roll[time_step]) total_reward += r epoch_reward += total_reward time_step += 1 print(epoch_reward) env.render(close=True)
Exploration/RL/ConvPolicy/Notebooks/PG.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys import purly # To set things up we'll need to stand up our model server. By default the server has a max refresh rate of `25` hertz. However for this example to be convincingly smooth we'll want to bump that up to about `60`. If you want to unlock the refresh rate set `refresh=None`. We'll then hook up a layout object to a model resource (see the introductory example or [read the docs](https://github.com/rmorshea/purly#purly) if this doesn't make sense). # + from example_utils import localhost # increase model server refresh cap to 60 hertz. purly.state.Machine(refresh=60).daemon() # name the layout resource "simple-slider" and connect to the update stream websocket_url = localhost('ws', 8000) + '/model/simple-slider/stream' layout = purly.Layout(websocket_url) # - # We'll then create a simple slider with values between 1 and 10 and add it to the layout before displaying. slider = layout.html('input', type='range', value="5", min=1, max=10, step=1) layout.children.append(slider) layout.sync() layout # You should now see the slider in the output above! # # However when you try to move it, nothing happens, so we'll need to hook into some mouse events to make the display animate. To do that requires the `onChange` event which can be captured via the `on` decorator of the `slider` element. The one detail that significant is passing the string `'value'` as a second argument to the decorator. This will sync the slider's value between Python and the browser when the event occurs. # # Once we've done that we can simply print out the now synced `slider.attributes['value']` as it changes. @slider.on('Change', 'value') def printer(): sys.stdout.write('\r%02d' % int(slider.attributes['value'])) sys.stdout.flush() layout.serve()
examples/slider.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plotting the NASA PGA data on an actual map # # Last time we looked at the NASA PGA data, we only plotted the data itself but didn't project it onto a map. This notebook will show you how to do that. # # In this exercise we will plot the Global Earthquake Hazard Distribution - Peak Ground Acceleration 1976-2002. This data was collected by NASA and you can download it for [free.](http://sedac.ciesin.columbia.edu/data/set/ndh-earthquake-distribution-peak-ground-acceleration/data-download#close) You will need to unzip the file you download. # # We have provided this data in the data folder as a zip file for ease of use. You will need to unzip the file to access the data. # # (You don't need to do this step if you already downloaded the data.) # + # %matplotlib inline import pandas as pd import numpy as np import cartopy import cartopy.crs as ccrs from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import matplotlib.pyplot as plt import matplotlib.ticker as mticker # - # # Import and mask the data # # In the first lesson on heat maps we talked about importing the data and masking it, here we do that step again. data_location = '../data/gdpga/gdpga.asc' df = pd.read_csv(data_location, skiprows=6, delim_whitespace=True, names=np.arange(8640)) masked_data = np.ma.masked_where(df.values<0,df.values) df.head() # + fig, ax = plt.subplots(1, figsize=(15,7)) proj = ccrs.Robinson() # the projection you want to use, many are supported ax = plt.axes(projection=proj) # sets the projection ax.add_feature(cartopy.feature.OCEAN # add ocean feature , zorder=0 # layer where ocean should plot; 0 is base , facecolor='w' # color of the ocean ) ax.add_feature(cartopy.feature.LAND # add continent feature , zorder=0 # layer where continent should plot; 0 is base , edgecolor='black' # coastline color , facecolor='lightgray' # continent color ) ax.coastlines(resolution='110m') # the level of resolution to render the drawings, # see documentation for more details lons = np.linspace(-180, 180, 8640) lats = np.linspace(-90, 90, 3432) lons, lats = np.meshgrid(lons, lats) ax.pcolormesh(lons, lats, masked_data, vmin=0, vmax=10, zorder=20, transform=ccrs.PlateCarree()) # - # # Well that looks wrong, What happened? # # We know what this map should look like since NASA tells us: # # ![actual map](http://i.imgur.com/JlORDk2.png) # # ## So what went wrong? # # 1. it looks like the latitude coordinates are flipped. # 2. it looks stretched across the latitude axis # # Going to the [NASA website](http://sedac.ciesin.columbia.edu/data/set/ndh-earthquake-distribution-peak-ground-acceleration/metadata) we see that it lists the bounding coordinates as: # # Bounding Coordinates: # West Bounding Coordinate: -180.000000 # East Bounding Coordinate: 180.000000 # North Bounding Coordinate: 85.000000 # South Bounding Coordinate: -58.000000 # # so flipping the sign on the `lats = np.linspace` and changing the bounding size should do the trick. # + fig, ax = plt.subplots(1, figsize=(15,7)) proj = ccrs.Robinson() # the projection you want to use, many are supported ax = plt.axes(projection=proj) # sets the projection ax.add_feature(cartopy.feature.OCEAN # add ocean feature , zorder=0 # layer where ocean should plot; 0 is base , facecolor='w' # color of the ocean ) ax.add_feature(cartopy.feature.LAND # add continent feature , zorder=0 # layer where continent should plot; 0 is base , edgecolor='black' # coastline color , facecolor='lightgray' # continent color ) ax.coastlines(resolution='110m') # the level of resolution to render the drawings, # see documentation for more details lons = np.linspace(-180, 180, 8640) lats = np.linspace(85, -58, 3432) lons, lats = np.meshgrid(lons, lats) ax.pcolormesh(lons, lats, masked_data, vmin=0, vmax=10, zorder=20, transform=ccrs.PlateCarree()) # - # # Yay it worked! # # But what if we want a different colormap? Or a colorbar to indicate the values? # # Matplotlib has a reference for colormaps: # # http://matplotlib.org/examples/color/colormaps_reference.html # # This is a great link to remember because you will use it a lot. # # Using the code here we can show below how to set a different colormap and create a colorbar. # + fig, ax = plt.subplots(1, figsize=(15,7)) proj = ccrs.Robinson() # the projection you want to use, many are supported ax = plt.axes(projection=proj) # sets the projection ax.add_feature(cartopy.feature.OCEAN # add ocean feature , zorder=0 # layer where ocean should plot; 0 is base , facecolor='w' # color of the ocean ) ax.add_feature(cartopy.feature.LAND # add continent feature , zorder=0 # layer where continent should plot; 0 is base , edgecolor='black' # coastline color , facecolor='lightgray' # continent color ) ax.coastlines(resolution='110m') # the level of resolution to render the drawings, # see documentation for more details lons = np.linspace(-180, 180, 8640) lats = np.linspace(85, -58, 3432) lons, lats = np.meshgrid(lons, lats) cbar = ax.pcolormesh(lons, lats, masked_data, vmin=0, vmax=10, zorder=20, transform=ccrs.PlateCarree(),cmap='gnuplot2') fig.colorbar(cbar, label='PGA', fraction=0.2, shrink=0.6, pad=0) # - # # Looks like it worked! # # But You don't always have data that is shaped just for you by NASA. Sometimes our data is calculated from an earthquake catalog. Next notebook we will start with the ANSS catalog, calculate some statistics, and grid them like the NASA PGA catalog data and then plot them on a map.
example_notebooks/cartopy/Plotting heat map data on a map using cartopy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Camera-Basic-Recognition # O Sistema deve identificar as provas a serem analisadas, independente de sua posição import sys def identify_alternative(image): try: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.medianBlur(gray, 5) rows = gray.shape[0] circles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,rows/10,param1=50,param2=30,minRadius=0, maxRadius=30) if circles is not None: circles = np.uint16(np.around(circles)) for i in circles[0, :]: center = (i[0], i[1]) #Ponto central cv2.circle(image, center, 1, (255, 0, 0), 3) #Circulo externo radius = i[2] cv2.circle(image, center, radius, (0, 255, 0), 3) cv2.imshow("detected circles", image) except: cv2.destroyAllWindows() exit(0) # Detecção de Borda(Retângulo) def identify_test(image): gray = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(gray, 127, 255, 1) _ , contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key = cv2.contourArea, reverse = True)[:1] for c in contours: x,y,w,h = cv2.boundingRect(c) cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2) identify_alternative(image) def camera_processing(): cap = cv2.VideoCapture(0) while(True): ret, frame = cap.read() identify_test(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() # + import numpy as np import imutils import sys import cv2 camera_processing() cv2.waitKey(0) cv2.destroyAllWindows()
lessons/5 - Camera-Basic-Recognition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Úkol č. 1 - vizualizace dat a web scraping (do 20. října) # # * V rámci tohoto úkolu musíte stáhnout dat z webu (tzv. _web scraping_, velmi základní) a následně data zpracovat a vizualizovat. # * Cílem bude stáhnout data ze serveru https://www.volby.cz týkající se voleb do zastupitelstva Vámi vybraného (většího) města, uložit data o závěrečných pracích v tabulkovém formátu a pak vymyslet vizualizace a zobrazení dat, které umožní orientaci v těchto datech a zvýrazní zajímavé informace a zobrazit přehledně časový vývoj různých veličin. # # > **Úkoly jsou zadány tak, aby Vám daly prostor pro invenci. Vymyslet _jak přesně_ budete úkol řešit, je důležitou součástí zadání a originalita či nápaditost bude také hodnocena!** # # ## Výběr zdroje dat # # Vyberte si větší město, které má zastupitelstvo druhu 3 (Zastupitelstvo statutárního města) a strojově stáhněte informace o stranách a kandidátkách z následujících let: # * [2002](https://www.volby.cz/pls/kv2002/kv12?xjazyk=CZ&xid=0), [2006](https://www.volby.cz/pls/kv2006/kv12?xjazyk=CZ&xid=0), [2010](https://www.volby.cz/pls/kv2010/kv12?xjazyk=CZ&xid=0), [2014](https://www.volby.cz/pls/kv2014/kv12?xjazyk=CZ&xid=0) a [2018](https://www.volby.cz/pls/kv2018/kv12?xjazyk=CZ&xid=0). # # # ## Pokyny k vypracování # # **Základní body zadání**, za jejichž (poctivé) vypracování získáte **8 bodů**: # * Strojově stáhněte data pro vybrané město a uložte je všechny do (asi dvou) přehledných tabulek ve formátu _csv_. # * Data musí obsahovat _alespoň_ toto: # * Vývoj výsledků (v procentech i počtu hlasů) pro jednotlivé strany v jednotlivých letech. # * Seznam všech kandidátů všech stran v jednotlivých letech, u kandidáta by mělo být zaznamenáno: jméno, věk v době voleb, navrhující strana, politická příslušnost, volební zisk (procento i počet hlasů), pořadí na kandidátce, pořadí zvolení, jestli získal mandát (tyto informace získáte souhrnně ve _jmenných seznamech_). # * V druhé části Vašeho Jupyter notebooku pracujte s těmito tabulkami načtenými z _csv_ souboru (aby opravující nemusel spouštět stahování z webu). # * Tabulky ve formátu _csv_ také odevzdejte. # * S využitím vybraných nástrojů zpracujte data a vymyslete vizualizace a grafy, aby bylo vidět následující: # * Časový vývoj (po rocích voleb) počtu kandidujících stran i lidí a to celkově i po jednotlivých stranách (ve volbách, kterých se daná strana účastnila). # * Věkovou strukturu kandidátů celkově i za jednotlivé strany a vývoj této struktury během jednotlivých voleb. # * Časový vývoj volební účasti a volebních výsledků jednotlivých stran. # * Časový vývoj podílu kandidujících s titulem a bez titulu. # # **Další body zadání** za případné další body (můžete si vybrat, maximum bodů za úkol je každopádně 12 bodů): # * (až +2 body) U titulů se pokuste rozlišit i různé stupně vzdělání: bakalářský, magisterský, doktorský a vyšší, vojenská hodnost atp. Zkuste odhadnout i podíl žen na kandidátkách. # * (až +4 body) Pokuste se u jednotlivých kandidátů zjistit, zda kandidovali ve více volbách. Najděte 10 nejpilnějších kandidátů a vypište jejich volební zisky a za jaké strany kandidovali. # * (až +2 body) Najděte nějaký balíček, který Vám dovolí do Vašeho notebooku zavést interaktivní prvky, např. si vyberete v select-boxu stranu a Váš notebook zobrazí grafy pouze pro ni atp. # # ## Poznámky k odevzdání # # * Řiďte se pokyny ze stránky https://courses.fit.cvut.cz/BI-VZD/homeworks/index.html. # * Odevzdejte nejen Jupyter Notebook, ale i _csv_ soubor(y) se staženými daty. # * Opravující Vám může umožnit úkol dodělat či opravit a získat tak další body. První verze je ale důležitá a bude-li odbytá, budete za to penalizováni. # ## Řešení # imports import numpy as np import pandas as pd import matplotlib as mpl import seaborn as sns from ipywidgets import widgets, interactive # %matplotlib inline # ## Stáhnutí tabulek pro každý rok pro město České Budějovice # * Pro každý rok stáhneme tři tabulky: tabulka se základními daty (voliči, účast, ...), tabulka stran, tabulka kandidátů # ### 2002 # + # election results url = "https://www.volby.cz/pls/kv2002/kv1111?xjazyk=CZ&xid=0&xdz=3&xnumnuts=3101&xobec=544256" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=".") elections_2002 = dfs[0] parties_2002 = dfs[1] # list of names url = "https://www.volby.cz/pls/kv2002/kv21111?xjazyk=CZ&xid=0&xv=11&xdz=3&xnumnuts=3101&xobec=544256&xstrana=0" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=".") candidates_2002 = dfs[0] # - # ### 2006 # + # election results url = "https://www.volby.cz/pls/kv2006/kv1111?xjazyk=CZ&xid=0&xdz=3&xnumnuts=3101&xobec=544256" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=",") elections_2006 = dfs[0] parties_2006 = dfs[1] # list of names url = "https://www.volby.cz/pls/kv2006/kv21111?xjazyk=CZ&xid=0&xv=11&xdz=3&xnumnuts=3101&xobec=544256&xstrana=0" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=",") candidates_2006 = dfs[0] # - # ### 2010 # + # election results url = "https://www.volby.cz/pls/kv2010/kv1111?xjazyk=CZ&xid=0&xdz=3&xnumnuts=3101&xobec=544256" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=",") elections_2010 = dfs[0] parties_2010 = dfs[1] # list of names url = "https://www.volby.cz/pls/kv2010/kv21111?xjazyk=CZ&xid=0&xv=11&xdz=3&xnumnuts=3101&xobec=544256&xstrana=0" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=",") candidates_2010 = dfs[0] # - # ### 2014 # + # election results url = "https://www.volby.cz/pls/kv2014/kv1111?xjazyk=CZ&xid=0&xdz=3&xnumnuts=3101&xobec=544256" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=",") elections_2014 = dfs[0] parties_2014 = dfs[1] # list of names url = "https://www.volby.cz/pls/kv2014/kv21111?xjazyk=CZ&xid=0&xv=11&xdz=3&xnumnuts=3101&xobec=544256&xstrana=0" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=",") candidates_2014 = dfs[0] # - # ### 2018 # + # election results url = "https://www.volby.cz/pls/kv2018/kv1111?xjazyk=CZ&xid=0&xdz=3&xnumnuts=3101&xobec=544256" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=",") elections_2018 = dfs[0] parties_2018 = dfs[1] # list of names url = "https://www.volby.cz/pls/kv2018/kv21111?xjazyk=CZ&xid=0&xv=11&xdz=3&xnumnuts=3101&xobec=544256&xstrana=0" dfs = pd.read_html(url, flavor="html5lib", thousands=u"\xa0", decimal=",") candidates_2018 = dfs[0] # - # ## Úpravy tabulek do správného formátu, spojení a uložení do CSV # ### Tabulka se základními informacemi # + # rename columns elections_cols = ["Počet volených členů zastupitelstva", "Počet volebních obvodů", "Počet okrsků", "Počet zpracovaných okrsků", "Počet zpracovaných okrsků v %", "Zapsaní voliči", "Vydané obálky", "Volební účast v %", "Odevzdané obálky", "Platné hlasy"] elections_2002.columns = elections_cols elections_2006.columns = elections_cols elections_2010.columns = elections_cols elections_2014.columns = elections_cols elections_2018.columns = elections_cols # add corresponding year elections_2002["Rok"] = 2002 elections_2006["Rok"] = 2006 elections_2010["Rok"] = 2010 elections_2014["Rok"] = 2014 elections_2018["Rok"] = 2018 # concatenate elections = pd.concat([elections_2002, elections_2006, elections_2010, elections_2014, elections_2018], ignore_index=True) # - # ### Tabulka se stranami # + # rename columns parties_cols = ["Číslo", "Název", "Hlasy abs.", "Hlasy v %", "Počet kandidátů", "Přepočtený základ dle počtu kandidátů", "Přepočtené % platných hlasů", "Počet mandátů", "Podíly hlasů"] parties_2002.columns = parties_cols parties_2006.columns = parties_cols parties_2010.columns = parties_cols parties_2014.columns = parties_cols parties_2018.columns = parties_cols # add corresponding year parties_2002["Rok"] = 2002 parties_2006["Rok"] = 2006 parties_2010["Rok"] = 2010 parties_2014["Rok"] = 2014 parties_2018["Rok"] = 2018 # concatenate parties = pd.concat([parties_2002, parties_2006, parties_2010, parties_2014, parties_2018], ignore_index=True) # drop unnecessary column 'Podíly hlasů' parties = parties.drop("Podíly hlasů", axis=1) # - # ### Tabulka s kandidáty # + # rename columns candidates_2002_cols = ["Číslo", "Název", "Pořadové číslo", "Příjmení, jméno", "Tituly", "Věk", "Navrhující strana", "Politická příslušnost", "Hlasy abs.", "Hlasy v %", "Pořadí zvolení/náhradníka", "Mandát"] candidates_cols = ["Číslo", "Název", "Pořadové číslo", "Příjmení, jméno, tituly", "Věk", "Navrhující strana", "Politická příslušnost", "Hlasy abs.", "Hlasy v %", "Pořadí zvolení/náhradníka", "Mandát"] candidates_2002.columns = candidates_2002_cols candidates_2006.columns = candidates_cols candidates_2010.columns = candidates_cols candidates_2014.columns = candidates_cols candidates_2018.columns = candidates_cols # add corresponding year candidates_2002["Rok"] = 2002 candidates_2006["Rok"] = 2006 candidates_2010["Rok"] = 2010 candidates_2014["Rok"] = 2014 candidates_2018["Rok"] = 2018 # concatenate candidates tables 2006, 2010, 2014, 2018 candidates_new = pd.concat([candidates_2006, candidates_2010, candidates_2014, candidates_2018], ignore_index=True) # split name and academic degrees to separate columns in candidates tables 2006, 2010, 2014, 2018 # this is actually more complicated than it sounds, people can have more than 2 names def construct(df): out = pd.DataFrame(columns=["Příjmení, jméno", "Tituly"]) degrees = [None, 'RNDr.', 'Bc.', 'Ing.', 'Mgr.', 'RSDr.', 'MUDr.', 'Doc.', 'DiS', 'JUDr.', 'ak.mal.', 'PhDr.', 'DiS.', 'dipl.', 'doc.', 'MgA.', 'prof.', 'MSc.,', 'ak.', 'MVDr.', 'Jur.', 'PaedDr.', 'Dr.', 'MBA', 'BBS', 'Ph.D.'] for i, r in df.iterrows(): index = 0 for ii, rr in enumerate(r): if rr in degrees: index = ii break jmeno = "" tituly = np.nan for ii, rr in enumerate(r): if ii < index: if jmeno != "": jmeno += " " jmeno += rr elif ii >= index: if rr != None: if not isinstance(tituly, str): tituly = "" if tituly != "": tituly += " " tituly += rr out = out.append({"Příjmení, jméno" : jmeno, "Tituly" : tituly}, ignore_index=True) return out new = construct(candidates_new["Příjmení, jméno, tituly"].str.split(n=0, expand=True)) candidates_new = candidates_new.drop("Příjmení, jméno, tituly", axis=1) candidates_new.insert(3, "Příjmení, jméno", new["Příjmení, jméno"]) candidates_new.insert(4, "Tituly", new["Tituly"]) # format 'Mandát' column to a boolean candidates_2002["Mandát"].replace({'*': True, np.nan: False}, inplace=True) candidates_new["Mandát"].replace({'*': True, '-': False}, inplace=True) candidates_new.fillna(value=np.nan, inplace=True) candidates_new["Pořadí zvolení/náhradníka"] = pd.to_numeric(candidates_new["Pořadí zvolení/náhradníka"], errors="coerce") # concatenate candidates = pd.concat([candidates_2002, candidates_new], ignore_index=True) # format 'Pořadí zvolení/náhradníka' column to int64 (NaN values to zero) candidates["<NAME>/náhradníka"] = candidates["<NAME>/náhradníka"].fillna(0).astype(np.int64) # replace \xa0 with space candidates["<NAME>"] = candidates["<NAME>"].str.replace(u"\xa0", " ") # - # ### Uložení do csv elections.to_csv("elections.csv", sep=";", index=False) parties.to_csv("parties.csv", sep=";", index=False) candidates.to_csv("candidates.csv", sep=";", index=False) # ## Práce s grafy # ### Načtení z CSV # + elections = pd.read_csv("elections.csv", sep=";") parties = pd.read_csv("parties.csv", sep=";") candidates = pd.read_csv("candidates.csv", sep=";") # replace NaN with empty string in academic degrees candidates["Tituly"] = candidates["Tituly"].fillna("") # - # ### Počet kandidujících stran v průběhu let party_count_years = parties.groupby("Rok")["Název"].count().to_frame() party_count_years.columns = ["Počet kand. stran"] ax = party_count_years.plot.bar(color="royalblue", legend=False, figsize=(8, 4)) _ = ax.set_ylabel("Počet kandidujících stran") # ### Počet kandidátů ve stranách v jednotlivých letech # + dd1 = widgets.Dropdown( options=[2002, 2006, 2010, 2014, 2018], value=2018, description='Rok: ') def plot_candidate_count(year): df = candidates.copy() df = df[df["Rok"] == year] df = df.groupby("Název")["Příjmení, jméno"].count().to_frame() df.columns = ["Počet kandidátů"] ax = df.plot.bar(color="limegreen", legend=False, figsize=(8, 4), ylim=[0,50]) ax.set_ylabel("Počet kandidátů") interactive(plot_candidate_count, year=dd1) # - # ### Průměrný věk kandidátů ve všech/jednotlivých stranách v průběhu let # + dd2 = widgets.Dropdown( options=['Všechny'] + list(parties['Název'].unique()), value='Všechny', description='Strana: ') def plot_avg_age(party): df = candidates.copy() if party != 'Všechny': df = df[df["Název"] == party] df = df.groupby("Rok")["Věk"].mean().to_frame() df.columns = ["Průměrný věk"] ax = df.plot.bar(color="red", legend=False, ylim=[30, 70], figsize=(8, 4)) ax.set_ylabel("Průměrný věk") interactive(plot_avg_age, party=dd2) # - # ### Volební účast napříč lety voter_turnout = elections[["Rok", "Volební účast v %"]] ax = voter_turnout.plot.line(x="Rok", y="Volební účast v %", marker="o", color="orange", xlim=[2000, 2020], xticks=[2002, 2006, 2010, 2014, 2018], ylim=[30, 45], legend=False, figsize=(8, 4)) _ = ax.set_ylabel("Volební účast v %") # ### Výsledky voleb (celkový přehled) mpl.pyplot.figure(figsize=(17, 4)) sns.heatmap(parties.pivot("Rok", "Název", "Hlasy v %"), cmap="Oranges", cbar_kws={'label': 'Hlasy v %'}) # ### Výsledky voleb (procento hlasů) v konkrétních letech # + dd3 = widgets.Dropdown( options=[2002, 2006, 2010, 2014, 2018], value=2018, description='Rok: ') def plot_results(year): df = parties.copy() df = df[df["Rok"] == year][["Název", "Hlasy v %"]] df.plot.pie(y="Hlasy v %", labels=df["Název"].tolist(), legend=None, figsize=(10, 10), autopct="%1.2f%%", label="") interactive(plot_results, year=dd3) # - # ### Procento kandidátů s titulem # + voter_degree_ratio = pd.DataFrame(columns=["Rok", "Podíl kandidujících s titulem a bez titulu"]) for year in range(2002, 2022, 4): df = candidates.copy() df = df[df["Rok"] == year] degree = df[df["Tituly"] != ""].shape[0] voter_degree_ratio = voter_degree_ratio.append({"Rok" : year, "Podíl kandidujících s titulem a bez titulu" : 100*degree/df.shape[0]}, ignore_index=True) voter_degree_ratio = voter_degree_ratio.astype({"Rok" : int, "Podíl kandidujících s titulem a bez titulu" : float}) ax = voter_degree_ratio.plot.line(x="Rok", y="Podíl kandidujících s titulem a bez titulu", marker="o", color="purple", xlim=[2000, 2020], xticks=[2002, 2006, 2010, 2014, 2018], figsize=(8, 4), ylim=[34, 50], legend=False) _ = ax.set_ylabel("Procento kandidátů s titulem") # - # ### Věková struktura ve všech/jednotlivých stranách v konkrétních letech # + dd4 = widgets.Dropdown( options=[2002, 2006, 2010, 2014, 2018], value=2018, description='Rok: ') dd5 = widgets.Dropdown( options=['Všechny'] + list(parties[parties["Rok"] == dd4.value]["Název"].unique()), value='Všechny', continuous_update=True, description='Strana: ') def dd4_on_change(change): if change['type'] == 'change' and change['name'] == 'value': dd5.options = ['Všechny'] + list(parties[parties["Rok"] == dd4.value]["Název"].unique()) dd4.observe(dd4_on_change) def plot_age_structure(year, party): age_structure = pd.DataFrame(columns=["Věková kategorie", "Počet"]) age_categories = {"<20" : (0, 20), "20-30": (20, 30), "30-40" : (30, 40), "40-50" : (40, 50), "50-60" : (50, 60), "60-70" : (60, 70), "70-80" : (70, 80), ">80" : (80, np.inf)} df = candidates.copy() df = df[df["Rok"] == year] if party != 'Všechny': df = df[df["Název"] == party] for k, v in age_categories.items(): lo, hi = v df_cat = df.copy() df_cat = df_cat[df_cat["Věk"] > lo] df_cat = df_cat[df_cat["Věk"] < hi] age_structure = age_structure.append({"Věková kategorie" : k, "Počet" : df_cat.shape[0]}, ignore_index=True) ax = age_structure.plot.bar(x="Věková kategorie", legend=False, y="Počet", color="y", figsize=(8, 4)) ax.set_ylabel("Počet") interactive(plot_age_structure, year=dd4, party=dd5) # - # ### Odhad procenta kandidujících žen napříč lety # + women_ratio = pd.DataFrame(columns=["Rok", "Podíl kandidujících žen a mužů"]) for year in range(2002, 2022, 4): df = candidates.copy() df = df[df["Rok"] == year] women = df[df["Příjmení, jméno"].str.match(".*(ová|ná|ská|ická|ká)\s.*")].shape[0] women_ratio = women_ratio.append({"Rok" : year, "Podíl kandidujících žen a mužů" : 100*women/df.shape[0]}, ignore_index=True) women_ratio = women_ratio.astype({"Rok" : int, "Podíl kandidujících žen a mužů" : float}) ax = women_ratio.plot.line(x="Rok", y="Podíl kandidujících žen a mužů", marker="o", color="deeppink", xlim=[2000, 2020], xticks=[2002, 2006, 2010, 2014, 2018], figsize=(8, 4), ylim=[28, 40], legend=False) _ = ax.set_ylabel("Procento kandidujících žen") # - # ### Struktura titulů ve všech/jednotlivých stranách v konkrétních letech # + dd6 = widgets.Dropdown( options=[2002, 2006, 2010, 2014, 2018], value=2018, description='Rok: ') dd7 = widgets.Dropdown( options=['Všechny'] + list(parties[parties["Rok"] == dd6.value]["Název"].unique()), value='Všechny', continuous_update=True, description='Strana: ') def dd6_on_change(change): if change['type'] == 'change' and change['name'] == 'value': dd7.options = ['Všechny'] + list(parties[parties["Rok"] == dd6.value]["Název"].unique()) dd6.observe(dd6_on_change) def plot_degree_structure(year, party): degree_structure = pd.DataFrame(columns=["Titul", "Počet"]) degree_categories = {"vyšší" : ".*(prof|doc).*", "doktorský": ".*(Ph\.D\.|DSc|CSc|Dr\.|DrSc|Th\.D*)", "magisterský" : ".*(Ing|Ing\. arch|MUDr|MDDr|MVDr|MgA|Mgr|JUDr|PhDr|RNDr|PharmDr|ThLic|ThDr|akad\. arch|ak\.mal.|ak\. soch|MSDr|PaedDr|PhMr|RCDr|RSDr|RTDr|ThMgr.*)", "bakalářský" : ".*(Bc|BcA).*", "neakademický" : ".*(DiS).*"} df = candidates.copy() df = df[df["Rok"] == year] if party != 'Všechny': df = df[df["Název"] == party] for category, regex in degree_categories.items(): df_cat = df.copy() df_cat = df_cat[df_cat["Tituly"].str.match(regex)] df = df[df["Tituly"].str.match(regex) == False] degree_structure = degree_structure.append({"Titul" : category, "Počet" : df_cat.shape[0]}, ignore_index=True) degree_structure = degree_structure.append({"Titul" : "bez titulu", "Počet" : df.shape[0]}, ignore_index=True) ax = degree_structure.plot.bar(x="Titul", y="Počet", legend=False, color="dodgerblue", figsize=(8, 4)) ax.set_ylabel("Počet") interactive(plot_degree_structure, year=dd6, party=dd7)
01/homework_01_B191.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload import swm import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt def consistent_fig(): return plt.figure(figsize=(10, 10)) # - # # Section 2 # $$\begin{aligned} # C(X_r, X_s) =& \frac{\sqrt{\frac{1}{n}\sum_{i=1}^n\left[x_r(i) - x_s(i)\right]^2}}{\sqrt{\frac{1}{n}\sum_{i=1}^nx_r(i)^2} + \sqrt{\frac{1}{n}\sum_{i=1}^nx_s(i)^2}} \\ # =& \frac{\sqrt{\phantom{\frac{1}{n}}\sum_{i=1}^n\left[x_r(i) - x_s(i)\right]^2}}{\sqrt{\phantom{\frac{1}{n}}\sum_{i=1}^nx_r(i)^2} + \sqrt{\phantom{\frac{1}{n}}\sum_{i=1}^nx_s(i)^2}} # \end{aligned}$$ # ## Benchmark problem # Two parallel plots have the same trend. But, they end up showing maximally inconsistent by TIC. # + consistent_fig() n = 100 x_r = np.ones(n) x_s_hflip = -x_r tic = swm.tic(x_r, x_s_hflip) plt.plot(x_r, label='$X_r$', color='red', lw=1) plt.plot(x_s_hflip, label='$X_s$', color='blue', lw=1) plt.title(f"TIC={tic}") sns.despine() plt.legend(); # - # This applies generally -- TIC can't adapt to translations. And, worse than that, the variance adjustments in the denominator make it so translations of the same magnitude but different signs have different scores. # + consistent_fig() np.random.seed(100) n = 11 c = 1 x_r = np.sqrt(np.arange(n)) plt.plot(x_r, label='$x_r$', color='red', lw=1) for marker, translation in zip("^xov", [4, 2, -2, -4]): x_s = x_r + translation tic = swm.tic(x_r, x_s) plt.plot(x_s, label='$x_{s_1}$ ' + f'(TIC={tic:0.3f})', color='blue', marker=marker, lw=1) sns.despine() plt.legend(); # - # ## Trend Problem # Even if the trends are maximally inconsistent, TIC can't pick it up, showing a (relatively) better score, # + consistent_fig() np.random.seed(100) n = 100 c = 1 x_r = np.arange(3)+1 x_s = -(x_r - 4) tic = swm.tic(x_r, x_s) plt.plot(x_r, label='$x_{r}$', marker='o', color='red', lw=1) plt.plot(x_s, label='$x_{s}$', marker='o', color='blue', lw=1) plt.ylim(0, 4) sns.despine() plt.title(f"TIC = {tic:0.3f}") plt.legend(); # + consistent_fig() np.random.seed(100) n = 100 c = 1 x_r_prime = np.diff(x_r) x_s_prime = np.diff(x_s) tic = swm.tic(x_r_prime, x_s_prime) plt.plot(x_r_prime, label='$x_{r}$', marker='o', color='red', lw=1) plt.plot(x_s_prime, label='$x_{s}$', marker='o', color='blue', lw=1) sns.despine() plt.title(f"TIC = {tic:0.3f}") plt.legend(); # - # # Simulation Model Validation Method # The metric they derived (2) # $$C(X_r, X_s) = # \left\{ # \begin{array}{ll} # \frac{\sqrt{\sum_{i=1}^n \left[x_r(i) - x_s(i)\right]^2}}{\sqrt{\sum_{i=1}^n x_r(i)^2}} & \mbox{if } \sqrt{\sum_{i=1}^n x_r(i)^2} \ne 0 \\ # \sqrt{\sum_{i=1}^n x_s(i)^2} & \mbox{if } \sqrt{\sum_{i=1}^n x_r(i)^2} = 0 # \end{array} # \right.$$ # confuses me a little. If the sum of squares for $x_r$ is zero as in the lower equation's condition, then it's just a line at 0. I'm not sure why you would want to compare the simulation output to what is essentially no action. (Maybe some robotics domain knowledge that I'm missing?) # Also, the $\xi$ is essentially acting as a temperature parameter in, # # $$D(X_r, X_s) = e^{-\xi C(X_r, X_s)}$$ # # to squash the new metric to $(0, 1)$. They say it is, # # > set by domain expert according to the purpose of simulation application # # so it's not as useful as TIC in that now there is a hyper-parameter to select. It's also a bit annoying in that their squashing function flipped the interpretability: Now close to 1 is a good match; before it mean maximally different. # ### Minor error # There are multiple errors in their equation (4) and (5). They have, # # $$\begin{aligned} # X'_r &= \langle x'_r(2), x'_r(2), \dots, x'_r(n-1)\rangle = \left\langle \frac{x_r(2) - x_r(1)}{\Delta t} + \frac{x_r(3) - x_r(2)}{\Delta t} + \cdots + \frac{x_r(n) - x_r(n-1)}{\Delta t} \right\rangle \\ # X'_s &= \langle x'_s(2), x'_s(2) \dots, x'_s(n-1)\rangle = \left\langle \frac{x_s(2) - x_s(1)}{\Delta t} + \frac{x_s(3) - x_s(2)}{\Delta t} + \cdots + \frac{x_s(n) - x_s(n-1)}{\Delta t} \right\rangle \\ # \end{aligned}$$ # # it should be, # # $$\begin{aligned} # X'_r &= \langle x'_r(2), x'_r(3), \dots, x'_r(n)\rangle = \left\langle \frac{x_r(2) - x_r(1)}{\Delta t} + \frac{x_r(3) - x_r(2)}{\Delta t} + \cdots + \frac{x_r(n) - x_r(n-1)}{\Delta t} \right\rangle \\ # X'_s &= \langle x'_s(2), x'_s(3), \dots, x'_s(n)\rangle = \left\langle \frac{x_s(2) - x_s(1)}{\Delta t} + \frac{x_s(3) - x_s(2)}{\Delta t} + \cdots + \frac{x_s(n) - x_s(n-1)}{\Delta t} \right\rangle \\ # \end{aligned}$$ # So basically, take the rate of change for each observation in the reference and simulation, use that for the "trends" series, too. # ## Integration of position conformity and trend conformity # This section assumes a set of simulation outputs, # # $$O_s = \{X_{s1}, X_{s2}, \dots, X_{sp} \}$$ # # which they use to construct a matrix, # # $$A = \begin{bmatrix} # a_{11} & a_{12} \\ # a_{12} & a_{22} \\ # \vdots & \vdots \\ # a_{p1} & a_{p2} \\ # \end{bmatrix}$$ # # with, # # $$\begin{aligned} # a_{i1} &= D(X_r, X_{si}) \\ # a_{i1} &= T(X_r, X_{si}) # \end{aligned}$$ # So the first column is the improved and squashed TIC on position and the second is on trend. They then translate A into S by, # # $$s_{ij} = \frac{a_{ij}}{\bar{a}_{\mathbf{\bullet} j}} $$ # # where $$a_{\mathbf{\bullet} j} = \sum_{k=1}^p \frac{a_{kj}}{p}$$ # # So, both columns are now in column-mean units. # # $$S = \begin{bmatrix} # s_{11} & s_{12} \\ # s_{12} & s_{22} \\ # \vdots & \vdots \\ # s_{p1} & s_{p2} \\ # \end{bmatrix}$$ # # **Here's where the presentation gets a little weird for me.** # They define, # # $$y = k_1s_1 + k_2s_2$$ # # which you can use to select the best simulation. It's a linear combination of the squashed/improved TIC metric on the position and trend, respectively. They further define, # # $$k_1^2 + k_2^2 = 1$$ # # so that $k$, the weights, represent a unit-vector. That would be a requirement of PCA. (I'm not sure why it would be mean centered (via division) versus mean-shifted so it was zero). # # But, here's the thing -- this whole section just derives PCA. See Replication section below. # # Application # They give the expected A matrix via Table 2 and 3. expected_D_col = np.array([0.729, 0.733, 0.663, 0.813, 0.840, 0.724, 0.69, 0.555, 0.8]) expected_T_col = np.array([1, 0.05, 1, 0.517, 0.730, 0.652, 0.368, .185, .192]) expected_A = np.vstack([expected_D_col, expected_T_col]).T expected_A.T # But they don't give the data used for Fig 3. I think this is *close* to correct. # + t = np.arange(21) x_sqrt = np.sqrt(t) X_r = x_sqrt two_spikes = x_sqrt.copy() + 0.7 two_spikes[6] = 5.7 two_spikes[11] = 5.7 simulations = [] simulations.append(('a', x_sqrt+1)) simulations.append(('b', two_spikes)) simulations.append(('c', x_sqrt+1.2)) simulations.append(('d', t * 5/20)) simulations.append(('e', t**(1/3)+1)) simulations.append(('f', t**(1/4)+2)) simulations.append(('g', 2 + np.ones_like(x_sqrt))) simulations.append(('e', 2 + -t**(1/8)+1)) simulations.append(('f', x_sqrt + np.sin(2*np.pi * t/20 * 2.25))) fig = plt.figure(figsize=(15, 15)) sns.set_style('whitegrid') shared_ax = None i = 1 for l, X_s in simulations: if i == 1: ax = fig.add_subplot(3, 3, i) shared_ax = ax plt.xlim((0, 20)) plt.ylim((0, 6)) else: ax = fig.add_subplot(3, 3, i, sharex=ax, sharey=ax) plt.plot(t, X_r, marker='o', color='red', lw=1) plt.plot(t, X_s, marker='d', color='blue', lw=1) plt.title("x") sns.despine() i += 1 # - # The also don't give their $\xi$. Using the deduced data set, it seems like $\xi=1$, # + consistent_fig() ξ_space = np.linspace(0.01, 2, 2001) scores = [] for ξ in ξ_space: A = swm.make_matrix_A(x_sqrt, [x_s for _, x_s in simulations], ξ) ϵ = np.abs(expected_A - A).sum() scores.append(ϵ) plt.plot(ξ_space, scores) plt.xlabel(r'$\xi$') plt.ylabel('Absolute error compared to paper matrix A'); # - # ## Replication # The "paydirt" of all of this is supposed to be equation (18) which they have as, # # $$y = 0.0223s_1 + 0.9998s_2$$ # What is this? It the projection of matrix $S$ onto the first principle component of S. I'm not sure why they felt compelled to derive the whole thing. The only difference is how they center it. # # The most obvious way to show this? Using the data from above, S = swm.make_matrix_S(x_sqrt, [x_s for _, x_s in simulations], ξ) S # Here is the PCA according to sklearn, # + from sklearn.decomposition import PCA # Create a PCA estimator. # (We only care about the 1st principle component) pca = PCA(1) # Fit it on S pca.fit(S) # Recover the principle components. # These are the k_1 and k_2 in their paper. k_ = pca.components_[0] k_ # - # That's it. # Or, "by hand," # + # Get covariance matrix as per (12) V = np.cov(S.T) # Get eigen values and vectors # That's what they get to in Equation (16) λ, k = np.linalg.eig(V) # Get the biggest eigenvalue i = np.argmax(λ) # Select the associated eigenvector k = k[:, 1] k # - # This is different only in sign. But, it's an eigenvector, so it's a unit vector, (k**2).sum() # and as such, it's only the vector that matters. If both of the weights are negative, you can negatate them, it doesn't change the mathematical object in this context. # + consistent_fig() plt.scatter([0], [0], marker=10) plt.quiver(*k, scale=3, lw=1, color='red') plt.quiver(*k_, scale=3, lw=1, color='blue'); # + consistent_fig() ax = plt.gca() ax.scatter(S[:, 0], S[:, 1]); for i, label in enumerate(c for c, _ in simulations): ax.annotate(label, S[i, :] + np.array([0.01, 0.01])) # - y = S @ k_ y # + consistent_fig() ax = plt.gca() y = S @ k_ ax.scatter(y, y); for i, label in enumerate(c for c, _ in simulations): ax.annotate(label, np.array([y[i], y[i]]) + np.array([0.0, -0.1])) # - df = pd.DataFrame({'y': y}, index=list('abcdefghi')) df['result'] = df['y'].rank(ascending=False) df.T # Or, using the replicated package, y = swm.compute_y(x_sqrt, [x_s for _, x_s in simulations], ξ) y y.argmax() # Best model by row index in S
thiel_app/.ipynb_checkpoints/clean_replication-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sequence to sequence # # > **訳注**:このnotebookは,Udacityの[Deep learning nanodegreeの公開教材](https://github.com/udacity/deep-learning)を和訳するプロジェクトの一環として,[Character Sequence to Sequence ](https://github.com/haltaro/deep-learning-in-japanese/blob/master/seq2seq/sequence_to_sequence_implementation.ipynb)を翻訳したものです(**Exercise**は翻訳対象外です).本プロジェクトのnotebook一覧は[こちら](https://haltaro.github.io/deep-learning-in-japanese/). # # # このノートブックでは,入力された文字列をソートして出力するモデルを,Sequence to Sequenceで実装します.このノートブックはTensorflow 1.1に対応しており,[Dave Currieの記事](https://medium.com/towards-data-science/text-summarization-with-amazon-reviews-41801c2210b)を参考にしています. # # <img src="images/sequence-to-sequence.jpg"/> # # # ## Dataset # # データセットは`/data/`ディレクトリにあります.以下のファイルが格納されています. # # * **letters_source.txt**: 入力文字列のリスト.一行に一つの文字列が存在します. # * **letters_target.txt**: 目標文字列のリスト.各行は,`letters_source.txt`と対応しています. # + import numpy as np import time import helper source_path = 'data/letters_source.txt' target_path = 'data/letters_target.txt' source_sentences = helper.load_data(source_path) target_sentences = helper.load_data(target_path) # - import warnings warnings.filterwarnings('ignore') # まず,データセットの中身を確認してみましょう.`source_sentences`は,`\n`で区切られた全ての入力文字列を保持しています. source_sentences[:50].split('\n') # `target_senteces`は,`\n`で区切られた,全ての目標文字列を保持しています.文字列の並び順は,`source_sentences`と対応しています. target_sentences[:50].split('\n') # ## Preprocess # # 文字列を,文字リストに変換する必要があります. # # <img src="images/source_and_target_arrays.png"/> # # それから,`vocab_to_int`に従い文字を整数値に変換します. # + def extract_character_vocab(data): special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>'] set_words = set([character for line in data.split('\n') for character in line]) int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))} vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()} return int_to_vocab, vocab_to_int # Build int2letter and letter2int dicts source_int_to_letter, source_letter_to_int = extract_character_vocab(source_sentences) target_int_to_letter, target_letter_to_int = extract_character_vocab(target_sentences) # Convert characters to ids source_letter_ids = [[source_letter_to_int.get(letter, source_letter_to_int['<UNK>']) for letter in line] for line in source_sentences.split('\n')] target_letter_ids = [[target_letter_to_int.get(letter, target_letter_to_int['<UNK>']) for letter in line] + [target_letter_to_int['<EOS>']] for line in target_sentences.split('\n')] print("Example source sequence") print(source_letter_ids[:3]) print("\n") print("Example target sequence") print(target_letter_ids[:3]) # - # これで準備は完了です. # ## Model # # ### Tensorflowのバージョン確認 # # Tensorflowのバージョンを確認します. # + from distutils.version import LooseVersion import tensorflow as tf from tensorflow.python.layers.core import Dense # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # - # ### Hyperparameters # Number of Epochs epochs = 60 # Batch Size batch_size = 128 # RNN Size rnn_size = 50 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 15 decoding_embedding_size = 15 # Learning Rate learning_rate = 0.001 # ### Input def get_model_inputs(): input_data = tf.placeholder(tf.int32, [None, None], name='input') targets = tf.placeholder(tf.int32, [None, None], name='targets') lr = tf.placeholder(tf.float32, name='learning_rate') target_sequence_length = tf.placeholder( tf.int32, (None,), name='target_sequence_length') max_target_sequence_length = tf.reduce_max( target_sequence_length, name='max_target_len') source_sequence_length = tf.placeholder( tf.int32, (None,), name='source_sequence_length') return input_data, targets, lr, target_sequence_length, \ max_target_sequence_length, source_sequence_length # ### Sequence to Sequence Model # # seq2seqモデルを構築するための関数定義を始めます.以下のようにボトムアップで各要素を構築します. # # 2.1 Encoder # - Embedding # - Encoder cell # 2.2 Decoder # 1- Process decoder inputs # 2- Set up the decoder # - Embedding # - Decoder cell # - Dense output layer # - Training decoder # - Inference decoder # 2.3 Seq2seq model connecting the encoder and decoder # 2.4 Build the training graph hooking up the model with the # optimizer # ### 2.1 Encoder # # モデルの最初の要素はEncoderです.この関数では,入力を埋め込み,Encoderを構築し,埋め込まれたデータをEncoderに渡します. # # - [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence)を使って入力データを埋め込みます. # # <img src="images/embed_sequence.png" /> # # - RNNスタックに埋め込まれたデータを渡します.RNNの状態を保存し,出力を蟲師ます. # # <img src="images/encoder.png" /> def encoding_layer(input_data, rnn_size, num_layers, source_sequence_length, source_vocab_size, encoding_embedding_size): # Encoder embedding enc_embed_input = tf.contrib.layers.embed_sequence( input_data, source_vocab_size, encoding_embedding_size) # RNN cell def make_cell(rnn_size): enc_cell = tf.contrib.rnn.LSTMCell( rnn_size, initializer=tf.random_uniform_initializer( -0.1, 0.1, seed=2)) return enc_cell enc_cell = tf.contrib.rnn.MultiRNNCell( [make_cell(rnn_size) for _ in range(num_layers)]) enc_output, enc_state = tf.nn.dynamic_rnn( enc_cell, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32) return enc_output, enc_state # ## 2.2 Decoder # # Decoderは,このモデルでおそらく最もややこしい部分です.構築には以下の手順が必要です. # # 1- Decoderへの入力を処理. # 2- Decoderの各要素を構築. # - Embedding # - Decoder cell # - Dense output layer # - Training decoder # - Inference decoder # # ### Decoderへの入力を処理 # # 学習プロセスにおいて,目標文字列は二つの使われ方をします. # # 1. 誤差計算. # 2. モデルをロバストにするためのDecoderの入力. # # 二点目について詳細に説明します.ここでは,以下のような目標データを考えます(実際はこの時点で整数値のはずですが,簡単のため文字として表現しています). # # <img src="images/targets_1.png"/> # # Decoderに入力する前に,このテンソルを加工する必要があります. # # 1- 各タイムステップにおいて,この文字列中の要素をDecoderに渡します.最後のタイムステップを考えてください.Decoderは最後にどの要素を出力するのでしょう?そのステップの入力は,目標文字列の最後から二番目の要素です.最後の要素は,Decoderへの入力として使われることはありません.よって,最後の要素を削除する必要があります. # # [`tf.strided_slice()`](https://www.tensorflow.org/api_docs/python/tf/strided_slice)メソッドを使って,これを行います.このメソッドに,テンソル,開始インデックス,および終了インデックスを引数として渡します. # # > **訳注**:以下の4つめの引数(`[1, 1]`)は`stride`で,この場合は特に指定する必要はありません. # # <img src="images/strided_slice_1.png"/> # # 2- 各文字列の最初の要素は`<GO>`シンボルのはずです.`<GO>`シンボルを先頭に追加します. # # <img src="images/targets_add_go.png"/> # # Decoderに渡す準備ができました.テンソルは以下のような見た目のはずです(もし整数値を文字列に変換したなら). # # <img src="images/targets_after_processing_1.png"/> # Process the input we'll feed to the decoder def process_decoder_input(target_data, vocab_to_int, batch_size): '''Remove the last word id from each batch and concat the <GO> to the begining of each batch''' ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1]) dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1) return dec_input # ### Decoderの各要素を構築 # # - Embedding # - Decoder cell # - Dense output layer # - Training decoder # - Inference decoder # # #### 1- Embedding # # Decoderの学習用の入力の準備は整いました.次は,入力値にEmbedding処理を行います. # # 下図のようなEmbedding行列を`tf.nn.embedding_lookup`で構築します. # # <img src="images/embeddings.png" /> # # #### 2- Decoder Cell # # 次に,Decoder cellを構築します.Encoderと同様,`tf.contrib.rnn.LSTMCell`を使います. # # 学習用のDecoderか,推論(Inference,prediction)用のDecoderか宣言する必要があります.これらのDecoderは内部パラメータを共有します.つまり(当たり前ですが)学習時に獲得したWeightとBiasは,推論時にも利用されます. # # まず,Decoder RNNで利用するタイプを定義します.ここではLSTMを選択しました. # # #### 3- Dense output layer # # Decoderを宣言する前に,出力層を構築する必要があります.出力層は`tensorflow.python.layers.Dense`で,各タイムステップで,Decoderの出力値をLogitsに変換します. # # #### 4- Training decoder # # 基本方針として,内部パラメータを共有する二つのDecoderを構築します.一つは学習用で,一つは推論用です.両方とも`tf.contrib.seq2seq.BasicDecoder`と`tf.contrib.seq2seq.dynamic_decode`を使います.しかし,学習用Decoderには目標文字列を入力する点が,推論用Decoderと違います.これはロバストなモデルにするための処理です. # # 学習用のDecoderは,例えば下図のようなものと考えられます(ただし,下図では文字ではなく単語を単位として扱っています). # # <img src="images/sequence-to-sequence-training-decoder.png"/> # # 学習用Decoderは各タイムステップの出力を次の入力として利用**しません**.その代わり,目標データセット(オレンジ色)を入力として用います. # # #### 5- Inference decoder # # 推論用Deocoderは,実際に推論に使うDecoderです. # # <img src="images/sequence-to-sequence-inference-decoder.png"/> # # 学習用Decoderと推論用Decoderのどちらに対しても,EncoderのHidden stateを渡します.Tensorflowはここまでのロジックのほとんどの機能を提供してくれます.我々は,`tf.contrib.seq2seq`から適切なメソッドを選択し,それらを適切な入力に適用するだけです. def decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size, target_sequence_length, max_target_sequence_length, enc_state, dec_input): # 1. Decoder Embedding target_vocab_size = len(target_letter_to_int) dec_embeddings = tf.Variable(tf.random_uniform( [target_vocab_size, decoding_embedding_size])) dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input) # 2. Construct the decoder cell def make_cell(rnn_size): dec_cell = tf.contrib.rnn.LSTMCell( rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) return dec_cell dec_cell = tf.contrib.rnn.MultiRNNCell( [make_cell(rnn_size) for _ in range(num_layers)]) # 3. Dense layer to translate the decoder's output at each time # step into a choice from the target vocabulary output_layer = Dense( target_vocab_size, kernel_initializer = tf.truncated_normal_initializer( mean = 0.0, stddev=0.1)) # 4. Set up a training decoder and an inference decoder # Training Decoder with tf.variable_scope("decode"): # Helper for the training process. Used by BasicDecoder to read inputs. training_helper = tf.contrib.seq2seq.TrainingHelper( inputs=dec_embed_input, sequence_length=target_sequence_length, time_major=False) # Basic decoder training_decoder = tf.contrib.seq2seq.BasicDecoder( dec_cell, training_helper, enc_state, output_layer) # Perform dynamic decoding using the decoder training_decoder_output, _, __ = tf.contrib.seq2seq.dynamic_decode( training_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length) # 5. Inference Decoder # Reuses the same parameters trained by the training process with tf.variable_scope("decode", reuse=True): start_tokens = tf.tile( tf.constant([target_letter_to_int['<GO>']], dtype=tf.int32), [batch_size], name='start_tokens') # Helper for the inference process. inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper( dec_embeddings, start_tokens, target_letter_to_int['<EOS>']) # Basic decoder inference_decoder = tf.contrib.seq2seq.BasicDecoder( dec_cell, inference_helper, enc_state, output_layer) # Perform dynamic decoding using the decoder inference_decoder_output, _, __ = tf.contrib.seq2seq.dynamic_decode( inference_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length) return training_decoder_output, inference_decoder_output # > **訳注**:Tensorflow 1.1では二つだった`tf.contrib.seq2seq.dynamic_decode`の返り値が,現在(1.6)では三つになっていたため,`training_decoder_output, _, __`および`inference_decoder_output, _, __`と修正しました. # ## 2.3 Seq2seq model # # EncoderとDecoderを繋ぎあわせてみましょう. # + def seq2seq_model(input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length, source_vocab_size, target_vocab_size, enc_embedding_size, dec_embedding_size, rnn_size, num_layers): # Pass the input data through the encoder. We'll ignore the encoder output, # but use the state _, enc_state = encoding_layer(input_data, rnn_size, num_layers, source_sequence_length, source_vocab_size, encoding_embedding_size) # Prepare the target sequences we'll feed to the decoder in training mode dec_input = process_decoder_input(targets, target_letter_to_int, batch_size) # Pass encoder state and decoder inputs to the decoders training_decoder_output, inference_decoder_output = decoding_layer( target_letter_to_int, decoding_embedding_size, num_layers, rnn_size, target_sequence_length, max_target_sequence_length, enc_state, dec_input) return training_decoder_output, inference_decoder_output # - # モデルは下図のような`rnn_output`Logitsを含む`training_decoder_output`と`inference_decoder_output`を出力します. # # <img src="images/logits.png"/> # # 学習用Decoderから得られたLogitsを`tf.contrib.seq2seq.sequence_loss()`に渡して,損失関数と勾配を計算します. # Build the graph train_graph = tf.Graph() # Set the graph to default to ensure that it is ready for training with train_graph.as_default(): # Load the model inputs input_data, targets, lr, target_sequence_length, \ max_target_sequence_length, source_sequence_length = get_model_inputs() # Create the training and inference logits training_decoder_output, inference_decoder_output = seq2seq_model( input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length, len(source_letter_to_int), len(target_letter_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers) # Create tensors for the training logits and inference logits training_logits = tf.identity(training_decoder_output.rnn_output, 'logits') inference_logits = tf.identity(inference_decoder_output.sample_id, name='predictions') # Create the weights for sequence_loss masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks') with tf.name_scope("optimization"): # Loss function cost = tf.contrib.seq2seq.sequence_loss( training_logits, targets, masks) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) # ## バッチ生成 # # バッチを生成する際,必要な処理が少しだけあります.以下は,`batch_size = 2`のときの簡単な例です.入力文字列は以下のように処理します(実際は整数値です.簡単のため文字列を表示しています): # # <img src="images/source_batch.png" /> # # 目標文字列は以下のように処理します: # # <img src="images/target_batch.png" /> def pad_sentence_batch(sentence_batch, pad_int): """Pad sentences with <PAD> so that each sentence of a batch has the same length""" max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch] def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int): """Batch targets, sources, and the lengths of their sentences together""" for batch_i in range(0, len(sources)//batch_size): start_i = batch_i * batch_size sources_batch = sources[start_i:start_i + batch_size] targets_batch = targets[start_i:start_i + batch_size] pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int)) pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int)) # Need the lengths for the _lengths parameters pad_targets_lengths = [] for target in pad_targets_batch: pad_targets_lengths.append(len(target)) pad_source_lengths = [] for source in pad_sources_batch: pad_source_lengths.append(len(source)) yield pad_targets_batch, pad_sources_batch, pad_targets_lengths, \ pad_source_lengths # ## 学習 # # モデルを学習させる準備が整いました.もしメモリ不足の問題が発生した場合は,`batch_size`を小さくしてみてください. # > **訳注**:`checkpoint = "best_model.ckpt" `だとエラーが出るので,[こちら](https://stackoverflow.com/questions/42134360/tensorflow-valueerror-parent-directory-of-trained-variables-ckpt-doesnt-exis)を参考に`checkpoint = "./best_model.ckpt"`と修正しました. # + # Split data to training and validation sets train_source = source_letter_ids[batch_size:] train_target = target_letter_ids[batch_size:] valid_source = source_letter_ids[:batch_size] valid_target = target_letter_ids[:batch_size] (valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next( get_batches(valid_target, valid_source, batch_size,source_letter_to_int['<PAD>'], target_letter_to_int['<PAD>'])) display_step = 20 # Check training loss after every 20 batches checkpoint = "./best_model.ckpt" with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(1, epochs+1): for batch_i, (targets_batch, sources_batch, targets_lengths, sources_lengths)\ in enumerate(get_batches( train_target, train_source, batch_size, source_letter_to_int['<PAD>'],target_letter_to_int['<PAD>'])): # Training step _, loss = sess.run( [train_op, cost], {input_data: sources_batch, targets: targets_batch, lr: learning_rate, target_sequence_length: targets_lengths, source_sequence_length: sources_lengths}) # Debug message updating us on the status of the training if batch_i % display_step == 0 and batch_i > 0: # Calculate validation cost validation_loss = sess.run( [cost], {input_data: valid_sources_batch, targets: valid_targets_batch, lr: learning_rate, target_sequence_length: valid_targets_lengths, source_sequence_length: valid_sources_lengths}) print('Epoch {:>3}/{} Batch {:>4}/{} - Loss: {:>6.3f} - Validation loss: {:>6.3f}' .format(epoch_i, epochs, batch_i, len(train_source) // batch_size, loss, validation_loss[0])) # Save Model saver = tf.train.Saver() saver.save(sess, checkpoint) print('Model Trained and Saved') # - # ## Prediction def source_to_seq(text): '''Prepare the text for the model''' sequence_length = 7 return [source_letter_to_int.get(word, source_letter_to_int['<UNK>']) for word in text]+ [source_letter_to_int['<PAD>']]*( sequence_length-len(text)) # + input_sentence = 'hello' text = source_to_seq(input_sentence) checkpoint = "./best_model.ckpt" loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(checkpoint + '.meta') loader.restore(sess, checkpoint) input_data = loaded_graph.get_tensor_by_name('input:0') logits = loaded_graph.get_tensor_by_name('predictions:0') source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0') target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0') #Multiply by batch_size to match the model's input parameters answer_logits = sess.run(logits, {input_data: [text]*batch_size, target_sequence_length: [len(text)]*batch_size, source_sequence_length: [len(text)]*batch_size})[0] pad = source_letter_to_int["<PAD>"] print('Original Text:', input_sentence) print('\nSource') print(' Word Ids: {}'.format([i for i in text])) print(' Input Words: {}'.format(" ".join([source_int_to_letter[i] for i in text]))) print('\nTarget') print(' Word Ids: {}'.format([i for i in answer_logits if i != pad])) print(' Response Words: {}'.format(" ".join([target_int_to_letter[i] for i in answer_logits if i != pad])))
seq2seq/sequence_to_sequence_implementation_j.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np #importing the libraries import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import pandas_profiling df = pd.read_csv('phishing.txt') # loading the dataset df.head() # seeing the top 5 rows elements df.shape df.columns = [ 'UsingIP', 'LongURL', 'ShortURL', 'Symbol@', 'Redirecting//', 'PrefixSuffix-', 'SubDomains', 'HTTPS', 'DomainRegLen', 'Favicon', 'NonStdPort', 'HTTPSDomainURL', 'RequestURL', 'AnchorURL', 'LinksInScriptTags', 'ServerFormHandler', 'InfoEmail', 'AbnormalURL', 'WebsiteForwarding', 'StatusBarCust', 'DisableRightClick', 'UsingPopupWindow', 'IframeRedirection', 'AgeofDomain', 'DNSRecording', 'WebsiteTraffic', 'PageRank', 'GoogleIndex', 'LinksPointingToPage', 'StatsReport', 'class' ] df.head() df.info() df.isnull().sum() # finding whether the given dataset consists of null values or not df.columns df['LongURL'].value_counts() for i in df.columns : # finding how many unique values are there in each columns print(df[i].value_counts()) df.head() x = df.drop('class',axis=1) # dropping the target column in the dataset y = df['class'] # creating the target individually from the given dataset from sklearn.model_selection import train_test_split # splitting the dataset to train and test x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=18) x_train.shape,x_test.shape,y_train.shape,y_test.shape from sklearn.linear_model import LogisticRegression # importing Logistic regression logreg = LogisticRegression(C=100) logreg.fit(x_train,y_train) # fitting the train and test data logreg.score(x_train,y_train) # train score logreg.score(x_test,y_test) # test score y_pred = logreg.predict(x_test) # predting the values from the x_test data df_pred = pd.DataFrame({'y_pred':y_pred, 'y_test':y_test}) df_pred from sklearn.metrics import accuracy_score accuracy_score(y_test,y_pred) # performance of the model df_pred # + jupyter={"outputs_hidden": true} list_y_test = list(y_test) list_y_test # + jupyter={"outputs_hidden": true} list_y_pred = list(y_pred) list_y_pred # + # count of misclassified samples in the test data prediction count= 0 for i in range(len(list_y_test)) : if list_y_test[i] != list_y_pred[i]: count=count+1 print(count) # + # Exercise 2 # - x1 = df[['PrefixSuffix-','AnchorURL']] x1.ndim y1 = df['class'] y1.value_counts() from sklearn.model_selection import train_test_split x_train1,x_test1,y_train1,y_test1 = train_test_split(x1,y1,test_size=0.3,random_state=9) x_train1.shape,x_test1.shape,y_train1.shape,y_test1.shape from sklearn.linear_model import LogisticRegression lgrg = LogisticRegression(C=100) lgrg.fit(x_train1,y_train1) lgrg.score(x_train1,y_train1) # train score lgrg.score(x_test1,y_test1) # test score y_pred1 = lgrg.predict(x_test1) # predicting the values from x_test data df_pred1 = pd.DataFrame({'y_test1':y_test1, 'y_pred1':y_pred1}) # + jupyter={"outputs_hidden": true} df_pred1 # - accuracy_score(y_test1,y_pred1) # performance of the model lgrg.coef_ lgrg.intercept_ # + xx, yy = np.mgrid[-5:5:.01, -5:5:.01] grid = np.c_[xx.ravel(), yy.ravel()] probs = lgrg.predict_proba(grid)[:, 1].reshape(xx.shape) #print(probs) f, ax = plt.subplots(figsize=(20,10)) contour = ax.contourf(xx, yy, probs, 25, cmap="rainbow", vmin=0, vmax=1) ax_c = f.colorbar(contour) ax_c.set_label("$P(y = 1)$") ax_c.set_ticks([0, .25, .5, .75, 1]) ax.scatter(x_test.iloc[:, 0], x_test.iloc[:, 1],c = (y_test == 15 ), s=50, cmap="rainbow", vmin=-.2, vmax=1.2, edgecolor="white", linewidth=1) """ax.set(aspect="equal", xlim=(-5, 5), ylim=(-5, 5), xlabel="$X_1$", ylabel="$X_2$") """ plt.show() # -
phisherdata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import ray import pyarrow as pa ray.init(address="127.0.0.1:6379") def cast(t: pa.Table) -> pa.Table: schema = t.schema field_idx = schema.get_field_index("review_body") field = schema.field(field_idx) new_field = field.with_type(pa.large_string()) new_schema = schema.set(field_idx, new_field) return t.cast(new_schema) ds = ray.data.read_parquet("s3://dsoaws/parquet/", _block_udf=cast) ds.groupby("product_category").count().show() # -
wip/ray/datasets/parquet-raydata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dyjdlopez/adu-pld/blob/main/lab-notes/pld_05.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="DlNAdryisosq" # # Arithmetic and Logical Operators # --- # $_{\text{© Project Alexandria | 2021 | Programming Logic and Design}}$ # + id="9gxvzeVCuHzi" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="H-45NtK5_FmC" # The core of software development lies in the well-structured arithmetic and logical flow of a program. Famous websites and apps such as Facebook, Google, Uber, and Netflix all rely on arithemtic and logic to maintain their services. In your journey in Software Development math and logic would never be out of the equation when you wish develop great apps. In this notebook, we will be tackling basic arithmetic and logic in Python. # + [markdown] id="ApIXsqWGsxDQ" # # Part 1: Arithmetic Operations # This is the part where most of the people who hate math would attempt to stop or skip. But before you go, let me show you an aopportunity to actually make math easier for you. Using programming, you can automate the equations that would be troublesome to do on paper. You can think that the mantra in programming for math is *run once, solve many times*. In this section, we will discuss basic to intermediate operations to help you out in applying it to basic math and some sciences. # + [markdown] id="LNRZE3tGs67h" # ## 1.1 PMDAS in Python # # Let's just go through the basic arithmetic and how you can actually code them. We'll look at the basic arithmetic: # * Addition # * Subtraction # * Multiplication # * Division # # We'll also look at the combining their uses in a single code block. # + id="W6DcbfTx7Plj" outputId="b3f363bf-d2de-46d9-cef0-780a56535fdd" colab={"base_uri": "https://localhost:8080/"} # @title Addition # @markdown Just simply put plus "+" sign between your numbers or variables num1 = 5 num2 = 7 total = num1+num2 print("{} plus {} is {}".format(num1,num2,total)) # + id="3XpUfWy77hmi" outputId="38a11b2c-7faf-427d-9bd7-2d1a4a45da8b" colab={"base_uri": "https://localhost:8080/"} # @title Subtraction # @markdown Just like addition, just put the minus "-" sign for subtraction. difference = num1-num2 print("{} minus {} is {}".format(num1,num2,difference)) # + id="OJxmAVfh7qVq" outputId="8fe183a5-3f86-4afc-b582-eea09645df54" colab={"base_uri": "https://localhost:8080/"} # @title Multiplication # @markdown To do multiplication just put the asterisk "*" symbol between the variables. product = num1*num2 print("{} times {} is {}".format(num1,num2,product)) # + id="0J43Etu672WL" outputId="a87f9cb9-6105-4993-968f-902660e25cd7" colab={"base_uri": "https://localhost:8080/"} # @title Division # @markdown For division use the forward slash "/" even if you are operating with integers your result would be floats. quotient = num1/num2 print("{} divided by{} is {:.2f}".format(num1, num2, quotient)) # + id="HB9hr1lR7_xr" # @markdown You can take this further and combine the arithmetic operations If you prefer knowing the equation in standard format it would actually look like this:$$\text{PMDAS} = \frac{3(num_1+num_2)}{2} + \frac{5}{3}(num_1-num_2) $$ ## this is usually called linear format pmdas = 3*(num1+num2)/2 + (5/3)*(num1-num2) print(pmdas) # + [markdown] id="u7LbLun-Jb30" # # + id="N0bePm4PDaoc" outputId="581a9589-711f-47e3-ffd0-0d847c80f960" colab={"base_uri": "https://localhost:8080/", "height": 262} ## Let's try to take user inputs to compute for its sum and difference f_num = input("Enter the first number:") s_num = input("Enter the second number:") sums = f_num + s_num diffs = f_num - s_num print("{0}+{1}={2}\n{0}-{1}={3}".format(f_num,s_num,sums,diffs)) # + id="hRXqR1YYELp6" # @markdown Oops you have run into an error. The error is telling you that you cannot perform subtraction between two strings! # @markdown The `input()` function does not automatically knows that your input is a number. As programmers you might need to anticipate this and apply a solution. One solution to this is to convert the string input to a number. To do this we will parse the string using the `int()` function. The `int()` function will convert the string to integers. ## Let's do the code again f_num = int(input("Enter the first number: ")) s_num = int(input("Enter the second number: ")) sums = f_num + s_num diffs = f_num - s_num print("{0}+{1}={2}\n{0}-{1}={3}".format(f_num,s_num,sums,diffs)) ## The float() function also exists! You can try this if your input is expecting ## a decimal value # + [markdown] id="43Pd-wfatDKx" # ## 1.2 Intermediate Arithmetic # Now let's try to do some intermediate math with Python. In Python, this would be easier to do compared to other programming languages however computing speed will be relatively slower compared to other languages like C and C++. For this section we will just tackle: # * Floor Division # * Modulo # * Exponentiation # + id="f4ENMzFf__fz" outputId="ba167e5d-25e9-42df-d4b2-c60250c689cc" colab={"base_uri": "https://localhost:8080/"} # @title Floor Division # @markdown In comparsion with regular division we are taking its whole number part by automatically rounding it up with the nearest ones place value. However, the output would now be an integer. num3 = 20 num4 = 7 print("Regular division: \n{}/{} = {}".format(num3, num4, num3/num4)) print("Floor division: \n{}/{} = {}".format(num3, num4, num3//num4)) # + id="Z1jVkl9g_b7q" outputId="58ff6414-a420-42e5-90e1-dab7d104309d" colab={"base_uri": "https://localhost:8080/"} # @title Modulo # @markdown The modulo operation does a division operation but only outputs the remainder of the operation. This could be useful print(num3%num4) # + colab={"base_uri": "https://localhost:8080/"} id="dg57Jtq5McXL" outputId="f2edd3e8-fb8c-4a12-e914-7ac9394420f4" # @markdown One good use of the modulo operation is checking for divisibility if the result of the modulo is 0 that means the dividend is divisible to the divisor. dividend = 36 # dividend = 40 divisor = 6 print(dividend % divisor) # + id="q9GX7tZVBGIC" outputId="40596a07-121b-4c2c-f873-3a8b25784e6c" colab={"base_uri": "https://localhost:8080/"} ## Now if you want to do the regular division with remainders you can ## combine the floor division and modulo operation together. whole_num = num3//num4 remainder = num3%num4 print("{}÷{}={}R{}".format(num3, num4, whole_num, remainder)) # + id="wlsnbBg2_ORD" outputId="bcc5c1be-f82a-4dcd-cb99-56bcf7f33011" colab={"base_uri": "https://localhost:8080/"} # @title Exponentiation #@markdown Exponents are prevalent in non-linear tasks such as quadratic behavior and most trends in real-world analytics. Exponentiation is also accessible in Python by making it part of the in-line equation programming. base = 10 power = 2 print(base**power) ## instead of using the caret "^" symbol ## we will use double asterisks "**" for exponentiation. # + id="G_Lt1sAgBzhz" outputId="6af6c24f-4fd4-4696-954a-0de5dbf141a1" colab={"base_uri": "https://localhost:8080/"} #@markdown If you are into math you would recall that roots of numbers can also be represented as exponents. We can use that logic in programming as well. squared = 25 sq_root = 25**(1/2) print("The square root of {} is {}".format(squared, sq_root)) # + id="UDgZxpIy9wsq" # @title Equation Modelling in Python #@markdown You can use Python in coding mathematics! We can try to transcribe equations quickly. Take this equation for example $$F = G\frac{m_1m_2}{r^2}$$ If you have done some physics before, this is the Universal law of gravitation where: #@markdown * $F$ is the force between two objects; #@markdown * $G$ is the universal gravitational constant; #@markdown * $m_1$ is the mass of the first object; #@markdown * $m_2$ is the mass of the second object; and #@markdown * $r$ is the distance between the two objects G = 6.67e10-11 m1 = 5.972e24 m2 = 7.348e22 r = 3.84e8 F = G*(m1*m2)/r**2 print("The force is {:.3e} N." .format(F)) ## We used .3e instead of .3f since we want to use ## scientific notation. ## You can try .3f is you want to see the difference! # + [markdown] id="uSzBWt4jtH0p" # ## 1.3 Arithmetic Assignment # In advanced cases, you might need to do compound operations with your variables. A common compound operation is recurrent arithmetic. This is done by adding a value to a variable and re-storing that in the same variable. This could be observed in some tasks such as: # * Increments # * Decrements # * Exponetial Growth # * Exponential Decay # + id="3EHWEPtrFJAh" # @title Increments # @markdown To start increment routines, we need to identify two main items. First is the base value or the starting number. The second is the increment value. inc_total = 0 ## starting value inc_const = 1 # + id="CVQUqEixFhWD" #@markdown This is the usual form that people might think when operating with a variable and then storing it back as a new number. In standard notation it would look like: $$ var_{new} = var_{old} + C$$ Where $var$ is just any variable and $C$ would be any constant. inc_total = inc_total + inc_const print(inc_total) # + id="A6I3j1E2FlyK" #@markdown However, we can simplify this in programming by using the arithmetic assignment. But first, we need to re-initialize our variables so they won't add up with the previous incremental sum. inc_const = 1 inc_total = 0 # + id="M4RuHPGUFgPy" inc_total += inc_const ## we can just use the "+=" operator to automatically assign the values. print(inc_total) # + id="GRhtW5_tFwnK" # @title Decrement # @markdown Just like increments, we need to declare the starting value for a decrement routine as well as the decrement constant. dec_const = 1 dec_total = 100 # + id="ZnUliNTeF3P5" dec_total -= dec_const ## just like in increments, you can just use "-=" as a decrement operator # + id="dpP_Lzt5GGGy" #@title Exponential growth/decay #@markdown Increments or decrements are linear in nature, it means that if you try to plot the values from an increment/decrement routine it will look like a line. In some cases, non-linear or exponential growth/decay may happen. This done when you continuously multiply or divide a constant to a variable. exp_const = 2.0 exp_growth = 1.0 exp_decay = 1024.00 # + id="F7ck6f4FGzJy" exp_growth *= exp_const exp_decay /= exp_const print(exp_growth) print(exp_decay) # + [markdown] id="7zAZ_w0vtwex" # # Part 2: Comparison Operations # Comparison operators or comparators are also vital in the logical design of programs. These operations compare values whether they are: # * Equal # * Identical # * Greater than # * Less than # * Not equal # # The output of any logical operation such as with comparators would be Truth Values or Boolean. Meaning, the results could only be either True or False. # + [markdown] id="JSJRNCjlt0jy" # ## 2.1 Equalities and Identities # It is essential to check whether values are equal to each other especially in validation routines in programming. However, equality and indentity are not the same in programmming. **Equality** means that values are just the same. While **identity** means that values and data types are also the same. # + id="Hge_Bk5PHQdq" ## Let's take these values as samples value_1 = 3 value_2 = 2 value_3 = 3.0 # + id="1Kbzt1HqHYhM" outputId="9213b5e6-96ae-45b4-935f-d0a116bcf796" colab={"base_uri": "https://localhost:8080/"} ## Equality print(value_1 == value_2) ## this would obviously not be equal print(value_1 == value_3) ## these would be equal in value # + id="QVBLOIN6JZsR" outputId="1fa77e35-5643-40d2-a450-5991d6f8d758" colab={"base_uri": "https://localhost:8080/"} ## Identity print(value_1 is value_2) print(value_1 is value_3) ## this will now be False since their data types are different # + colab={"base_uri": "https://localhost:8080/"} id="bwk_FdH2cOfG" outputId="6f0babb8-2dd4-4476-dcee-2ad7ca13d169" ## Try your own values out! x = 1 # @param y = "1" # @param print(x==y) # + [markdown] id="mS78YGaxt30R" # ## 2.2 Inequalities # Inquality operators span a large concept. It is not only being a match but also determining range. Inequalities could include: # * Non-equality # * Greater than # * Less than # + id="gD5TqyzfLEK5" outputId="9375ebc1-1489-4b39-ba6c-df461958cfb4" colab={"base_uri": "https://localhost:8080/"} # @title Non-equality # @markdown Non-equality is a basic comparison of not being equal to each other. It can be represented using the "!=" comparator. The exclamation point in programming is generally accepted as a symbol of negation. So in a sense it is read as "not equal". print(value_1 != value_2) print(value_1 != value_3) # + id="ARbqYbASKi7z" outputId="87a34e38-c4f6-4327-8b42-782e5537c154" colab={"base_uri": "https://localhost:8080/"} # @title Greater than # @markdown Extending inequalites, the greater than comparator checks whether the left hand variable is greater than the right-hand variable. print(value_1 > value_2) print(value_1 > value_3) # + id="QrWaRxwpK4XJ" outputId="55ab2ee3-cac1-4629-a6d1-026c9c9d9310" colab={"base_uri": "https://localhost:8080/"} # @title Greater than or Equal # @markdown If you wish to include the right-hand variable in the range of comparison just add the equals symbol to make it ">=". This means that the left-hand variable is greater than or equal to the right-hand variable. In math, you might see this as the $\geq$ symbol. print(value_1 >= value_2) print(value_1 >= value_3) # + id="JH1OM5WLKq4r" outputId="24aee658-8b68-45a5-a937-90f90ef90e41" colab={"base_uri": "https://localhost:8080/"} # @title Less Than # @markdown The less than "<" comparator checks whether left-hand variable is smaller the right-hand variable. print(value_1 < value_2) print(value_2 < value_3) # + id="jru6Umq3K-kz" outputId="08a33365-530f-4a9a-e062-7b0c49624491" colab={"base_uri": "https://localhost:8080/"} # @title Less than or Equal # @markdown If you wish to include the right-hand variable in the range of comparison just add the equals symbol to make it "<=". This means that the left-hand variable is less than or equal to the right-hand variable. In math, you might see this as the $\leq$ symbol. print(value_1 <= value_2) print(value_2 <= value_3) # + [markdown] id="CoOMzywLs4QJ" # # Part 3: Logical Operations # We now move towards to structuring the logic of the program. Here, we will use logical operations to compare different logical statements and comparisons. These are also call propositional logic operations but we'll just call them logical operations for brevity. # + [markdown] id="mleZe2XOtRox" # ## 3.1 Negation (NOT Operation) # The negation operation just reverses the logic of the boolean values. Whatever that is `False` would be `True` and what is `True` to be `False`. # + id="hE6xD3ptLxHq" outputId="45a9cc61-d42c-43b6-e97f-57f6c3e301ac" colab={"base_uri": "https://localhost:8080/"} ## Just use the not() function to negate the values. print(not(bool1)) print(not(bool2)) # + id="8nZ3K3W4L3QE" outputId="05449d71-c8a1-4c4a-8b0e-848916e42649" colab={"base_uri": "https://localhost:8080/"} ## Let's look an example where we would use comparators and logical operations together passing = 50 my_score = 75 is_pass = my_score >= passing print("Is score {} greater or equal than the passing {}?\nPass: {}\n".format(passing, my_score, is_pass)) print("Is score {} less than the passing {}?\nFail: {}\n".format(passing, my_score, not(is_pass))) # + [markdown] id="0TB5VXcjteB5" # ## 3.2 Conjuction (AND Operation) # The conjuction operation or simply the **and** operation requires a minimum of two boolean values to start the comparison. The **and** operation would only result to `True` when all of the boolean values to compare is also `True`. # + id="vPBH4WTLM2di" outputId="94d7c004-709e-4a1b-ce2a-c0943c27e5de" colab={"base_uri": "https://localhost:8080/"} ## Here is what we call a Truth Table. ## x and y are the inputs for a 2-value comparison print("| x\t| y\t| x AND y") print("| {}\t| {}\t| {}".format(True, True, True and True)) print("| {}\t| {}\t| {}".format(True, False, True and False)) print("| {}\t| {}\t| {}".format(False, True, False and True)) print("| {}\t| {}\t| {}".format(False, False, False and False)) # + [markdown] id="YoJ0fUIFkktP" # **Let's have an example** # # You were tasked to create a piece of code that would validate the order of a customer. There are three (3) choices of Set Meals: A, B, and C. Then there are four (4) choices of drinks: 1, 2, 3, and 4. The customer ordered Set Meal B **and** drink option 1. # + id="85lE1DrAQ1G6" outputId="04991a16-1520-4319-8e3f-6a6c090cc4e5" colab={"base_uri": "https://localhost:8080/"} ordered_meal = 'B' ordered_drinks = '1' delivered_meal = 'B' delivered_drinks = '2' purchase = (ordered_meal == delivered_meal) and (ordered_drinks == delivered_drinks) print("The purchase was successful: {}".format(purchase)) # + [markdown] id="krQckcqhtgu0" # ## 3.3 Disjunction (OR Operation) # The disjuction operation or simply the **or** operation requires a minimum of two boolean values to start the comparison. The **or** operation would result to `True` when any of the boolean values to compare is `True`. # + id="P_123tCQsYIs" outputId="927a542a-3189-43d1-ed77-76ec05f2313d" colab={"base_uri": "https://localhost:8080/"} ## Here is the truth table for an OR operation print("| x\t| y\t| x OR y") print("| {}\t| {}\t| {}".format(True, True, True or True)) print("| {}\t| {}\t| {}".format(True, False, True or False)) print("| {}\t| {}\t| {}".format(False, True, False or True)) print("| {}\t| {}\t| {}".format(False, False, False or False)) # + [markdown] id="h6qZ2rzklh7Y" # **Let's have an example** # # You were tasked to create a piece of code that would validate the suggestion of a movie recommender. The users would mention their preferences for their movies, such as the movie's duration and genre. Customers would usually like suggestions from **any** of their choices. # # + id="27d4uXC8R6wT" outputId="01ff1776-bc44-40ff-b32e-ed82487a52b5" colab={"base_uri": "https://localhost:8080/"} pref_time = "short" pref_genre = "horror" recom_time = "long" recom_genre = "horror" match = (pref_time == recom_time) or (pref_genre == recom_genre) print("Was the recommendation useful? {}".format(match)) # + [markdown] id="oxxT8czquCPR" # # Summary # # In this notebook, we discussed about the different fundamental arithmetic and logical operations in Python. We learned that arithmetic operations are primarily designed for numbers such as: # * Addition # * Subtraction # * Multiplication # * Division # * Floor Division # * Modulo # * Exponentiation # * Increments/Decrements # * Exponential Growth # # We also learned further about logical operations like comparators. We used comparators to check for equalities, identities, and inequalities. And lastly, we talked about logical operations such as the NOT, AND, and OR operations. # + [markdown] id="3PNot65yuDHC" # # Additional Readings # # 1. [Arithmetic Operators](https://www.w3schools.com/python/gloss_python_arithmetic_operators.asp) # 2. [Assignment Operators](https://www.w3schools.com/python/gloss_python_assignment_operators.asp) # 3. [Comparison Operators](https://www.w3schools.com/python/gloss_python_comparison_operators.asp) # 4. [Logical Operations](https://www.pythontutorial.net/python-basics/python-logical-operators/)
lab-notes/pld_05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="u8KXftfxYoHG" # # Node2vec, Edge2vec, and Graph2vec # + [markdown] id="iWjahuWDq9xH" # The first, and most straightforward, way of creating features capable of representing structural information from graphs is the extraction of certain statistics. For instance, a graph could be represented by its degree distribution, efficiency, and other metrics. # # A more complex procedure consists of applying specific kernel functions or, in other cases, engineering-specific features that are capable of incorporating the desired properties into the final machine learning model. However, as you can imagine, this process could be really time-consuming and, in certain cases, the features used in the model could represent just a subset of the information that is really needed to get the best performance for the final model. # # In the last decade, a lot of work has been done in order to define new approaches for creating meaningful and compact representations of graphs. The general idea behind all these approaches is to create algorithms capable of *learning* a good representation of the original dataset such that geometric relationships in the new space reflect the structure of the original graph. We usually call the process of learning a good representation of a given graph **representation learning** or **network embedding.** # + id="_eb3mirfeY5i" # !pip install node2vec # !pip install karateclub # !pip install python-Levenshtein # !pip install gensim==3.8.0 # + id="62BCFXzAkTu-" import random import networkx as nx from node2vec import Node2Vec import matplotlib.pyplot as plt from karateclub import Graph2Vec from node2vec.edges import HadamardEmbedder # + id="lCqYt9cwjwin" def draw_graph(G, pos_nodes, node_names={}, node_size=50, plot_weight=False): nx.draw(G, pos_nodes, with_labels=False, node_size=node_size, edge_color='gray', arrowsize=30) pos_attrs = {} for node, coords in pos_nodes.items(): pos_attrs[node] = (coords[0], coords[1] + 0.08) nx.draw_networkx_labels(G, pos_attrs, font_family='serif', font_size=20) if plot_weight: pos_attrs = {} for node, coords in pos_nodes.items(): pos_attrs[node] = (coords[0], coords[1] + 0.08) nx.draw_networkx_labels(G, pos_attrs, font_family='serif', font_size=20) edge_labels=dict([((a,b,),d["weight"]) for a,b,d in G.edges(data=True)]) nx.draw_networkx_edge_labels(G, pos_nodes, edge_labels=edge_labels) plt.axis('off') axis = plt.gca() axis.set_xlim([1.2*x for x in axis.get_xlim()]) axis.set_ylim([1.2*y for y in axis.get_ylim()]) # + [markdown] id="hKKQLnJ3jwi5" # ## Graph2Vec Example # + [markdown] id="4xUq6bfUrFT9" # Given a dataset with *m* different graphs, the task is to build a machine learning algorithm capable of classifying a graph into the right class. We can then see this problem as a classification problem, where the dataset is defined by a list of pairs, $<G_i,y_i>$, where $G_i$ is a graph and $y_i$ is the class the graph belongs to. # # Representation learning (network embedding) is the task that aims to learn a mapping function $f:G \to \mathbb{R}^n$, from a discrete graph to a continuous domain. Function $f$ will be capable of performing a low-dimensional vector representation such that the properties (local and global) of graph $G$ are preserved. # + id="fDocRPY5jwi7" colab={"base_uri": "https://localhost:8080/", "height": 598} outputId="6b79a670-1f56-4607-f54e-2c474f470aa5" n_graphs = 20 def generate_radom(): n = random.randint(6, 20) k = random.randint(5, n) p = random.uniform(0, 1) return nx.watts_strogatz_graph(n,k,p), [n,k,p] Gs = [generate_radom() for x in range(n_graphs)] model = Graph2Vec(dimensions=2, wl_iterations=10) model.fit([x[0] for x in Gs]) embeddings = model.get_embedding() fig, ax = plt.subplots(figsize=(10,10)) for i,vec in enumerate(embeddings): ax.scatter(vec[0],vec[1], s=1000) ax.annotate(str(i), (vec[0],vec[1]), fontsize=40) # + [markdown] id="517u4dbGjwiq" # ## Node2Vec example # + [markdown] id="pTmSVt79rVou" # Given a (possibly large) graph $G = (V,E)$, the goal is to classify each vertex $v \in V$ into the right class. In this setting, the dataset includes $G$ and a list of pairs, $<v_i,y_i>$, where $v_i$ is a node of graph $G$ and $y_i$ is the class to which the node belongs. In this case, the mapping function would be $f:V \to \mathbb{R}^n$. # + id="eO7bEiTMjwis" colab={"base_uri": "https://localhost:8080/", "height": 319} outputId="c7854190-d4ac-4849-cf77-f26e903bac0a" G = nx.barbell_graph(m1=7, m2=4) draw_graph(G, nx.spring_layout(G)) # + colab={"base_uri": "https://localhost:8080/", "height": 83, "referenced_widgets": ["4873a694c93046e8a911ff3788c9ee20", "7c922cae76e1458a8e8cff91f9408ae2", "9d5c32f7b94b45698c8ecbec7f8dfbec", "2ca39fc95d3149d9989c7eaf685f0fd3", "<KEY>", "<KEY>", "<KEY>", "00f4bc36a0d94ba88e2df3227c08478c"]} id="nAwL7XyQpBRk" outputId="7d79dd64-45d0-4945-bd07-b8eb16f7af21" node2vec = Node2Vec(G, dimensions=2) model = node2vec.fit(window=10) # + id="bIvLaJmEjwiw" colab={"base_uri": "https://localhost:8080/", "height": 592} outputId="fd76fb17-f3c6-41b6-8040-095c3bfe54dc" fig, ax = plt.subplots(figsize=(10,10)) for x in G.nodes(): v = model.wv.get_vector(str(x)) ax.scatter(v[0],v[1], s=1000) ax.annotate(str(x), (v[0],v[1]), fontsize=12) # + [markdown] id="vRtI11rrjwi0" # ## Edge2Vec example # + [markdown] id="2yflt-XQrfu8" # Given a (possibly large) graph $G = (V,E)$, the goal is to classify each edge $e \in E$, into the right class. In this setting, the dataset includes $G$ and a list of pairs, $<e_i,y_i>$, where $e_i$ is an edge of graph $G$ and $y_i$ is the class to which the edge belongs. Another typical task for this level of granularity is **link prediction**, the problem of predicting the existence of a link between two existing nodes in a graph. In this case, the mapping function would be $f:E \to \mathbb{R}^n$. # + id="MSQp-gVvjwi1" edges_embs = HadamardEmbedder(keyed_vectors=model.wv) # + id="EPcCgb9xjwi2" colab={"base_uri": "https://localhost:8080/", "height": 592} outputId="acf0e540-ee4a-4a56-c024-8fee3a20f80c" fig, ax = plt.subplots(figsize=(10,10)) for x in G.edges(): v = edges_embs[(str(x[0]), str(x[1]))] ax.scatter(v[0],v[1], s=1000) ax.annotate(str(x), (v[0],v[1]), fontsize=16)
docs/T926278_Node2vec_Edge2vec_and_Graph2vec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Deep learning for computer vision # # # This notebook will teach you to build and train convolutional networks for image recognition. Brace yourselves. # # CIFAR dataset # This week, we shall focus on the image recognition problem on cifar10 dataset # * 60k images of shape 3x32x32 # * 10 different classes: planes, dogs, cats, trucks, etc. # # <img src="cifar10.jpg" style="width:80%"> # + import numpy as np from cifar import load_cifar10 X_train,y_train,X_val,y_val,X_test,y_test = load_cifar10("cifar_data") class_names = np.array(['airplane','automobile ','bird ','cat ','deer ','dog ','frog ','horse ','ship ','truck']) print (X_train.shape,y_train.shape) # + import matplotlib.pyplot as plt # %matplotlib inline plt.figure(figsize=[12,10]) for i in range(12): plt.subplot(3,4,i+1) plt.xlabel(class_names[y_train[i]]) plt.imshow(np.transpose(X_train[i],[1,2,0])) # - # # Building a network # # Simple neural networks with layers applied on top of one another can be implemented as `torch.nn.Sequential` - just add a list of pre-built modules and let it train. # + import torch, torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable # a special module that converts [batch, channel, w, h] to [batch, units] class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), -1) # - # Let's start with a dense network for our baseline: # + model = nn.Sequential() # reshape from "images" to flat vectors model.add_module('flatten', Flatten()) # dense "head" model.add_module('dense1', nn.Linear(3 * 32 * 32, 64)) model.add_module('dense1_relu', nn.ReLU()) model.add_module('dense2_logits', nn.Linear(64, 10)) # logits for 10 classes # - # As in our basic tutorial, we train our model with negative log-likelihood aka crossentropy. def compute_loss(X_batch, y_batch): X_batch = Variable(torch.FloatTensor(X_batch)) y_batch = Variable(torch.LongTensor(y_batch)) logits = model(X_batch) return F.cross_entropy(logits, y_batch).mean() # example compute_loss(X_train[:5], y_train[:5]) # ### Training on minibatches # * We got 40k images, that's way too many for a full-batch SGD. Let's train on minibatches instead # * Below is a function that splits the training sample into minibatches # An auxilary function that returns mini-batches for neural network training def iterate_minibatches(X, y, batchsize): indices = np.random.permutation(np.arange(len(X))) for start in range(0, len(indices), batchsize): ix = indices[start: start + batchsize] yield X[ix], y[ix] # + opt = torch.optim.SGD(model.parameters(), lr=0.01) train_loss = [] val_accuracy = [] # + import time num_epochs = 100 # total amount of full passes over training data batch_size = 50 # number of samples processed in one SGD iteration for epoch in range(num_epochs): # In each epoch, we do a full pass over the training data: start_time = time.time() model.train(True) # enable dropout / batch_norm training behavior for X_batch, y_batch in iterate_minibatches(X_train, y_train, batch_size): # train on batch loss = compute_loss(X_batch, y_batch) loss.backward() opt.step() opt.zero_grad() train_loss.append(loss.data.numpy()[0]) # And a full pass over the validation data: model.train(False) # disable dropout / use averages for batch_norm for X_batch, y_batch in iterate_minibatches(X_val, y_val, batch_size): logits = model(Variable(torch.FloatTensor(X_batch))) y_pred = logits.max(1)[1].data.numpy() val_accuracy.append(np.mean(y_batch == y_pred)) # Then we print the results for this epoch: print("Epoch {} of {} took {:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss (in-iteration): \t{:.6f}".format( np.mean(train_loss[-len(X_train) // batch_size :]))) print(" validation accuracy: \t\t\t{:.2f} %".format( np.mean(val_accuracy[-len(X_val) // batch_size :]) * 100)) # - # Don't wait for full 100 epochs. You can interrupt training after 5-20 epochs once validation accuracy stops going up. # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ### Final test # + model.train(False) # disable dropout / use averages for batch_norm test_batch_acc = [] for X_batch, y_batch in iterate_minibatches(X_test, y_test, 500): logits = model(Variable(torch.FloatTensor(X_batch))) y_pred = logits.max(1)[1].data.numpy() test_batch_acc.append(np.mean(y_batch == y_pred)) test_accuracy = np.mean(test_batch_acc) print("Final results:") print(" test accuracy:\t\t{:.2f} %".format( test_accuracy * 100)) if test_accuracy * 100 > 95: print("Double-check, than consider applying for NIPS'17. SRSly.") elif test_accuracy * 100 > 90: print("U'r freakin' amazin'!") elif test_accuracy * 100 > 80: print("Achievement unlocked: 110lvl Warlock!") elif test_accuracy * 100 > 70: print("Achievement unlocked: 80lvl Warlock!") elif test_accuracy * 100 > 60: print("Achievement unlocked: 70lvl Warlock!") elif test_accuracy * 100 > 50: print("Achievement unlocked: 60lvl Warlock!") else: print("We need more magic! Follow instructons below") # - # ## Task I: small convolution net # ### First step # # Let's create a mini-convolutional network with roughly such architecture: # * Input layer # * 3x3 convolution with 10 filters and _ReLU_ activation # * 2x2 pooling (or set previous convolution stride to 3) # * Flatten # * Dense layer with 100 neurons and _ReLU_ activation # * 10% dropout # * Output dense layer. # # # __Convolutional layers__ in torch are just like all other layers, but with a specific set of parameters: # # __`...`__ # # __`model.add_module('conv1', nn.Conv2d(in_channels=3, out_channels=10, kernel_size=3)) # convolution`__ # # __`model.add_module('pool1', nn.MaxPool2d(2)) # max pooling 2x2`__ # # __`...`__ # # # Once you're done (and compute_loss no longer raises errors), train it with __Adam__ optimizer with default params (feel free to modify the code above). # # If everything is right, you should get at least __50%__ validation accuracy. # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # __Hint:__ If you don't want to compute shapes by hand, just plug in any shape (e.g. 1 unit) and run compute_loss. You will see something like this: # # __`RuntimeError: size mismatch, m1: [5 x 1960], m2: [1 x 64] at /some/long/path/to/torch/operation`__ # # See the __1960__ there? That's your actual input shape. # # ## Task 2: adding normalization # # * Add batch norm (with default params) between convolution and ReLU # * nn.BatchNorm*d (1d for dense, 2d for conv) # * usually better to put them after linear/conv but before nonlinearity # * Re-train the network with the same optimizer, it should get at least 60% validation accuracy at peak. # # # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # ## Task 3: Data Augmentation # # There's a powerful torch tool for image preprocessing useful to do data preprocessing and augmentation. # # Here's how it works: we define a pipeline that # * makes random crops of data (augmentation) # * randomly flips image horizontally (augmentation) # * then normalizes it (preprocessing) # + from torchvision import transforms means = np.array((0.4914, 0.4822, 0.4465)) stds = np.array((0.2023, 0.1994, 0.2010)) transform_augment = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomRotation([-30, 30]), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(means, stds), ]) # + from torchvision.datasets import CIFAR10 train_loader = CIFAR10("./cifar_data/", train=True, transform=transform_augment) train_batch_gen = torch.utils.data.DataLoader(train_loader, batch_size=32, shuffle=True, num_workers=1) # + for (x_batch, y_batch) in train_batch_gen: print('X:', type(x_batch), x_batch.shape) print('y:', type(y_batch), y_batch.shape) for i, img in enumerate(x_batch.numpy()[:8]): plt.subplot(2, 4, i+1) plt.imshow(img.transpose([1,2,0]) * stds + means ) raise NotImplementedError("Plese use this code in your training loop") # TODO use this in your training loop # - # When testing, we don't need random crops, just normalize with same statistics. # + transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(means, stds), ]) test_loader = <YOUR CODE> # - # ## The Quest For A Better Network # # See `practical_dl/homework02` for a full-scale assignment.
week03_convnets/seminar_pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import numpy as np import matplotlib matplotlib.use('Agg',warn=False) import matplotlib.pyplot as plt # %matplotlib inline import warnings warnings.filterwarnings('ignore') from IPython.display import Markdown as md # + fdir = './outs_test' modeldir = './pretrained_model/' ae_dir = './wae_metric/pretrained_model/' batch_size = 100 if not os.path.exists(fdir): os.makedirs(fdir) if not os.path.exists(modeldir): os.makedirs(modeldir) # + from wae_metric.run_WAE import LATENT_SPACE_DIM, load_dataset #these utilities are loaded from the autoencoder scripts to keep them consistent jag_inp, jag_sca, jag_img = load_dataset('./data/') LATENT_DIM = LATENT_SPACE_DIM # - print('---------------Dataset Information---------------\nInput parameters: {}, Output Scalars: {}, Output Images: {}'.format(jag_inp.shape, jag_sca.shape, jag_img.shape)) # ## Create Test Train Splits np.random.seed(4321) #this is the random seed used during training. tr_id = np.random.choice(jag_sca.shape[0],int(jag_sca.shape[0]*0.8),replace=False) print(tr_id[:10]) te_id = list(set(range(jag_sca.shape[0])) - set(tr_id)) # + X_train = jag_inp[tr_id,:] y_sca_train = jag_sca[tr_id,:] y_img_train = jag_img[tr_id,:] np.random.shuffle(te_id) X_test = jag_inp[te_id,:] y_sca_test = jag_sca[te_id,:] y_img_test = jag_img[te_id,:] y_img_test_mb = y_img_test[-100:,:] y_img_test_mb = y_img_test_mb.reshape(100,64,64,4) # - # ## Save Ground Truth Images in "fdir" # + from utils import plot for k in range(4): fig = plot(y_img_test_mb[:,:,:,k],immax=np.max(y_img_test_mb[:,:,:,k].reshape(-1,4096),axis=1), immin=np.min(y_img_test_mb[:,:,:,k].reshape(-1,4096),axis=1)) plt.savefig('{}/gt_img_{}_{}.png' .format(fdir,str(k).zfill(3),str(k)), bbox_inches='tight') plt.close() # - dim_x = X_train.shape[1] dim_y_sca = y_sca_train.shape[1] dim_y_img = y_img_train.shape[1] dim_y_img_latent = LATENT_DIM #latent space # ## Build the Computational Graph # # + from modelsv2 import cycModel_MM import wae_metric.model_AVB as wae import tensorflow as tf tf.reset_default_graph() y_sca = tf.placeholder(tf.float32, shape=[None, dim_y_sca]) y_img = tf.placeholder(tf.float32, shape=[None, dim_y_img]) x = tf.placeholder(tf.float32, shape=[None, dim_x]) train_mode = tf.placeholder(tf.bool,name='train_mode') y_mm = tf.concat([y_img,y_sca],axis=1) ### 1. Map outputs (images, scalars) --> latent space with pre-trained autoencoder y_latent_img = wae.gen_encoder_FCN(y_mm, dim_y_img_latent,train_mode) ### 2. Next, build the CycleGAN that learns to map input params <--> latent vector cycGAN_params = {'input_params':x, 'outputs':y_latent_img, 'param_dim':dim_x, 'output_dim':dim_y_img_latent, 'L_adv':1e-2, # controls "physical" consistency 'L_cyc':1e-1, # controls cyclical consustency 'L_rec':1.} # controls fidelity of surrogate JagNet_MM = cycModel_MM(**cycGAN_params) JagNet_MM.run(train_mode) ### 3. Decode the predictions from the CycleGAN into output space of images and scalars y_img_out = wae.var_decoder_FCN(JagNet_MM.output_fake, dim_y_img+dim_y_sca,train_mode) # + t_vars = tf.global_variables() m_vars = [var for var in t_vars if 'wae' in var.name] metric_saver = tf.train.Saver(m_vars) saver = tf.train.Saver(list(set(t_vars)-set(m_vars))) sess = tf.Session() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(modeldir) ckpt_metric = tf.train.get_checkpoint_state(ae_dir) if ckpt_metric and ckpt_metric.model_checkpoint_path: metric_saver.restore(sess, ckpt_metric.model_checkpoint_path) print("************ Image Metric Restored! **************") if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) print("************ Model restored! **************") # - # ## Train the network # + from utils import test_imgs_plot for it in range(50000): randid = np.random.choice(X_train.shape[0],batch_size,replace=False) x_mb = X_train[randid,:] y_img_mb = y_img_train[randid,:] y_sca_mb = y_sca_train[randid,:] fd = {x: x_mb, y_sca: y_sca_mb,y_img:y_img_mb,train_mode:True} _, dloss,gloss0,gloss1 = sess.run([JagNet_MM.D_solver,JagNet_MM.loss_disc, JagNet_MM.loss_gen0,JagNet_MM.loss_gen1], feed_dict=fd) _ = sess.run([JagNet_MM.G0_solver],feed_dict=fd) if it%100 == 0: print('Iter: {}; forward loss: {:.4}; inverse loss: {:.4}' .format(it, gloss0, gloss1)) if it%500==0: nTest = 16 x_test_mb = X_test[-nTest:,:] samples,samples_x = sess.run([y_img_out,JagNet_MM.input_cyc], feed_dict={x: x_test_mb,train_mode:False}) data_dict= {} data_dict['samples'] = samples data_dict['samples_x'] = samples_x data_dict['y_sca'] = y_sca_test data_dict['y_img'] = y_img_test data_dict['x'] = x_test_mb test_imgs_plot(fdir,it,data_dict) # -
cycGAN_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (spk) # language: python # name: spk # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#count-distinct" data-toc-modified-id="count-distinct-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>count distinct</a></span></li><li><span><a href="#Get-distinct-column-values" data-toc-modified-id="Get-distinct-column-values-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Get distinct column values</a></span></li><li><span><a href="#Group-by-one-column-and-filter-rows-with-maximum" data-toc-modified-id="Group-by-one-column-and-filter-rows-with-maximum-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Group by one column and filter rows with maximum</a></span><ul class="toc-item"><li><span><a href="#using-leftsemi-join" data-toc-modified-id="using-leftsemi-join-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>using leftsemi join</a></span></li><li><span><a href="#using-sql" data-toc-modified-id="using-sql-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>using sql</a></span></li><li><span><a href="#using-pandas" data-toc-modified-id="using-pandas-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>using pandas</a></span></li></ul></li><li><span><a href="#Get-average-after-filtering" data-toc-modified-id="Get-average-after-filtering-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Get average after filtering</a></span></li></ul></div> # + import numpy as np import pandas as pd import pyspark from pyspark import SparkConf, SparkContext, SQLContext from pyspark.sql import SparkSession from pyspark.sql.functions import udf # @udf("integer") def myfunc(x,y): return x - y from pyspark.sql import functions as F # stddev format_number date_format, dayofyear, when from pyspark.sql.types import StructField, StringType, IntegerType, StructType print([(x.__name__,x.__version__) for x in [np, pd, pyspark]]) spark = pyspark.sql.SparkSession.builder.appName('example').getOrCreate() sc = spark.sparkContext sqlContext = SQLContext(sc) sc.setLogLevel("INFO") # - # # count distinct # + data = [("2001","id1"),("2002","id1"), ("2002","id1"),("2001","id1"), ("2001","id2"),("2001","id2"), ("2002","id2"),("2003","id1")] df = spark.createDataFrame(data,["year","id"]) df.show() # - # Note that countDistinct does not count Null as a distinct value! df.groupBy("year").agg(F.countDistinct("id").alias('count')).show() df.count() # total number of rows df.distinct().count() df.select(F.countDistinct('year')).show() dfp = pd.DataFrame(data,columns=['year','id']) dfp dfp.groupby(['year'])['id'].count().reset_index() dfp.groupby('year').agg({'id': 'count'}) # # Get distinct column values x = df.select('year').distinct() x.show() x.collect() x.dropDuplicates(['year']).show() # # Group by one column and filter rows with maximum # + data = [ ('a', 5, 'c'), ('a', 8, 'd'), ('a', 7, 'e'), ('b', 1, 'f'), ('b', 3, 'g') ] df = spark.createDataFrame(data, ['A','B','C']) df.show() # - df.groupBy('A').agg({'B': 'max'}).show() df.groupBy('A').agg(F.max('B')).show() # ## using leftsemi join # + # we also want to get column with C column # - df.join(df.groupBy('A').agg(F.max('B').alias('B')),on='B',how='leftsemi').show() # ## using sql df.createOrReplaceTempView("table") # this is sql temporary view # + q = '''SELECT * FROM table a LEFT SEMI JOIN ( SELECT A, max(B) as max_B FROM table GROUP BY A ) t ON a.A=t.A AND a.B=t.max_B ''' results = spark.sql(q) results.show() # - # ## using pandas dfp = df.toPandas() dfp pd.merge(dfp.groupby('A')['B'].max(), dfp) dfp.groupby('A')['B'].max().to_frame().merge(dfp) dfp.groupby('A',as_index=False)['B'].max().merge(dfp) # # Get average after filtering df.show() df = df.withColumn('age', df.B ) df.show() df.filter(df['B'] > 5).agg({'age': 'avg'}).show() df.filter(df['B'] > 5).agg(F.avg(F.col('age'))).show() from pyspark.sql.functions import avg, col, when df.select( avg( when(df['B'] > 5, df['age']) )).show()
Section9_SparkDF_Exercise/pyspark_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <img title="GitHub Octocat" src='./img/Octocat.jpg?raw=true' style='height: 60px; padding-right: 15px' alt="Octocat" align="left"> This notebook is part of a GitHub repository: https://github.com/pessini/insurance-claim-prediction/ # <br>MIT Licensed # <br>Author: <NAME> # # <p style="background-color:#018cb7; font-size:100%; text-align:left; color:#FFFFFF; padding: 15px 5px 15px 25px; border-radius: 15px;">Porto Seguro’s Safe Driver Prediction - Kaggle</p> # ## Predict if a driver will file an insurance claim next year # <img src='./img/safe-driving-730x432.jpeg' style='height:400px; border-radius: 5px;'/> # --- # # <p style="background-color:#018cb7; font-size:100%; text-align:left; color:#FFFFFF; padding: 15px 5px 15px 25px; border-radius: 15px;">Contents:</p> # # * [1. Introduction](#1) # * [1.1 Data Description](#1.1) # * [1.2 Libraries](#1.2) # * [1.3 Loading Dataset](#1.3) # * [2. Preprocessing & Feature Engineering](#2) # * [2.1 Target variable distribution](#2.1) # * [2.2 Missing values](#2.2) # * [2.3 Exploratory Analysis](#2.3) # * [2.4 Features Importance](#2.4) # * [2.5 Data transformation and normalization](#2.5) # * [3. Models](#3) # * [3.1 LightGBM](#3.1) # * [3.2 XGBoost](#3.2) # * [3.3 Random Search for Hyper-Parameter Optimization](#3.3) # * [3.4 LightGBM Tuned](#3.4) # * [4. Evaluation](#4) # * [4.1 Normalized Gini Coefficient](#4.1) # * [4.2 LightGBM](#4.2) # * [4.3 XGBoost](#4.3) # * [4.4 LightGBM Tuned](#4.4) # * [4.5 ROC AUC](#4.5) # * [5. Kaggle Submission](#5) # # <a id="1"></a> # # <p style="background-color:#018cb7; font-size:100%; text-align:left; color:#FFFFFF; padding: 15px 5px 15px 25px; border-radius: 15px;">1- Introduction</p> # <img title="Porto Seguro" src="img/porto-seguro-logo-1-3.png" alt="Porto Seguro" style='height:80px; padding: 10px; padding-right: 15px;' align="center"> # # [Porto Seguro](https://www.portoseguro.com.br/en/institutional) is one of the largest insurance companies in Brazil specialized in car and home insurance. Located in São Paulo, Porto Seguro has been one of the leading insurers in Brazil since its foundation in 1945. # # A key challenge faced by all major insurers is, when it comes to car insurance, how to address fairness towards good drivers and try not to penalize those who have a good driving history on account of a few bad drivers. Inaccuracies in car insurance claim predictions usually raise its cost for good drivers and reduce the price for bad ones. # # <NAME> has been applying Machine Learning for more than 20 years and intends to make car insurance more accessible to everyone. # # <img title="<NAME>" src="img/Kaggle_logo.png" alt="<NAME>" style='height:80px; padding: 10px; padding-right: 15px' align="center"> # # [Kaggle](https://www.kaggle.com/) is an online community of data scientists and allows users to find and publish data sets, explore and build ML models, and enter competitions to solve data science challenges. # # In this [competition](https://www.kaggle.com/c/porto-seguro-safe-driver-prediction/), the challenge is to build a model that predicts the probability that a car insurance policyholder will file a claim next year. # <a id="1.1"></a> # ## Data Description # In the train and test data: # # - Features that belong to similar groupings are tagged as such in the feature names (e.g., `ind`, `reg`, `car`, `calc`). # - Feature names include the postfix `bin` to indicate binary features and `cat` to indicate categorical features. # - Features __without__ these designations are either __continuous or ordinal__. # - Values of `-1` indicate that the feature was missing from the observation. # - The `target` columns signifies whether or not a claim was filed for that policy holder. # <a id="1.2"></a> # ## Libraries # + import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import matplotlib.pylab as pylab import seaborn as sns import random import json from pathlib import Path # %matplotlib inline matplotlib.style.use("ggplot") # Preprocessing import sklearn from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import train_test_split # Models import lightgbm as lgb from lightgbm import LGBMClassifier import xgboost as xgb from xgboost import XGBClassifier import joblib from numpy import savetxt from numpy import loadtxt # Evaluation from sklearn.metrics import roc_auc_score from sklearn.metrics import auc from sklearn.metrics import roc_curve from numpy import argmax # Hyperparameter optimization from sklearn.model_selection import RandomizedSearchCV import time import warnings warnings.simplefilter('ignore', FutureWarning) # - from platform import python_version print('Python version:', python_version()) # # !pip install -q -U watermark # %reload_ext watermark # %watermark -a "<NAME>" --iversions # <a id="1.3"></a> # ## Loading Dataset # + # Kaggle #path = "../input/porto-seguro-safe-driver-prediction/" # Local path = "./data/" # - # %%time train_df = pd.read_csv(path + "train.csv").set_index('id') test_df = pd.read_csv(path + "test.csv").set_index('id') # --- # As per competition description, there are a few calculated features. In one of the discussions on Kaggle, it was highlighted that some kind of transformation was applied in order to generate these features. I will drop them and apply the transformations using my best judgment. train_df = train_df.drop(train_df.filter(regex='_calc').columns, axis=1) test_df = test_df.drop(test_df.filter(regex='_calc').columns, axis=1) print('Train Dataset - Number of rows are',train_df.shape[0], 'and number of columns are ',train_df.shape[1]) print('Test Dataset - Number of rows are',test_df.shape[0], 'and number of columns are ',test_df.shape[1]) # <a id="2"></a> # # <p style="background-color:#018cb7; font-size:100%; text-align:left; color:#FFFFFF; padding: 15px 5px 15px 25px; border-radius: 15px;">2- Preprocessing & Feature Engineering</p> # <a id="2.1"></a> # ## Target variable distribution # `target` variable 1 means that a claim was filed and 0 that it was not claimed. # + y = train_df.target fig = plt.figure(figsize=(7, 5)) ax = sns.countplot(y,label="Count") total_size = len(train_df) # Display the target value ratio at the top of the bar for patch in ax.patches: height = patch.get_height() width = patch.get_width() left_coord = patch.get_x() percent = height/total_size*100 ax.text(x=left_coord + width/2.0, y=height + 3000, s='{:1.1f}%'.format(percent), ha='center') ax.set_title('Target Distribution'); plt.savefig('./plots/target_distribution.png', dpi=fig.dpi) # - # --- # The target feature has a severe __imbalance distribution__ showing that only __3.6% filled a claim__ and 96.4% did not. # # This will be handle by the algorithm on a hyperparameter `is_unbalance = True`. # <a id="2.2"></a> # ## Missing values # # Values of __`-1`__ indicate that the feature was missing from the observation. # + vars_with_missing = [] for f in train_df.columns: missings = train_df[train_df[f] == -1][f].count() if missings > 0: vars_with_missing.append(f) missings_perc = missings/train_df.shape[0] print('Variable {} has {} records ({:.2%}) with missing values'.format(f, missings, missings_perc)) print('\nIn total, there are {} variables with missing values'.format(len(vars_with_missing))) # - # --- # Only `ps_car_03_cat` and `ps_car_05_cat` have a large number (~ >= 50%) of missing values. # - ps_car_03_cat has 411231 records (69.09%) # - ps_car_05_cat has 266551 records (44.78%) # <a id="2.1"></a> # ## Metadata # # To make data management easier, a meta-info about the variables is added to the DataFrame. It will help handling those variables later on the analysis, data viz and modeling. # # - __level__: categorical, numerical, binary # - __dtype__: int, float, str # # We do not have information on which features are ordinal or not so a meta-info `numerical` will be added in order to apply __Normalization__ later. data = [] for f in train_df.columns: if 'bin' in f or f == 'target': level = 'binary' elif 'cat' in f: level = 'categorical' elif train_df[f].dtype == float: level = 'numerical' elif train_df[f].dtype == int: level = 'numerical' # Defining the data type dtype = train_df[f].dtype # Creating a Dict that contains all the metadata for the variable f_dict = { 'varname': f, 'level': level, 'dtype': dtype } data.append(f_dict) meta = pd.DataFrame(data, columns=['varname', 'level', 'dtype']) meta.set_index('varname', inplace=True) # Example to extract all categorical variables that are not dropped meta[(meta.level == 'categorical')].index # ### Number of variables per role and level pd.DataFrame({'count' : meta.groupby(['level'])['level'].size()}).reset_index() # <a id="2.3"></a> # ## Exploratory Analysis numerical_features = meta[(meta.level == 'numerical')].index fig = plt.figure(figsize=(12, 10)) cont_corr = train_df[numerical_features].corr() # Correlation between continuous features sns.heatmap(cont_corr, annot=True, cmap='OrRd'); # Plot heatmap plt.savefig('./plots/heatmap.png', dpi=fig.dpi) # There are a strong correlations between the variables: # # - ps_car_12 and ps_car_13 (0.67) # - ps_reg_01 and ps_reg_03 (0.64) # - ps_car_13 and ps_car_15 (0.53) # - ps_reg_02 and ps_reg_03 (0.52) # # Heatmap showed low number of correlated variables, we'll look at three of highly correlated variables separately. # Convert -1 from training data copy to np.NaN train_copy = train_df.copy().replace(-1, np.NaN) train_copy = train_copy.dropna() s = train_copy.sample(frac=0.2) # __NOTE__: sampling was applied to speed up the process. # ### ps_car_12 x ps_car_13 sns.lmplot(x='ps_car_12', y='ps_car_13', data=s, hue='target', palette='Set1', scatter_kws={'alpha':0.3}) plt.savefig('./plots/ps_car_12xps_car_13.png', dpi=fig.dpi) plt.show() # ### ps_reg_01 x ps_reg_03 sns.lmplot(x='ps_reg_01', y='ps_reg_03', data=s, hue='target', palette='Set1', scatter_kws={'alpha':0.3}) plt.savefig('./plots/ps_reg_01xps_reg_03.png', dpi=fig.dpi) plt.show() # ### ps_reg_02 x ps_reg_03 sns.lmplot(x='ps_reg_02', y='ps_reg_03', data=s, hue='target', palette='Set1', scatter_kws={'alpha':0.3}) plt.savefig('./plots/ps_reg_02xps_reg_03.png', dpi=fig.dpi) plt.show() # As the number of correlated variables is rather low, dimensionality reduction will not be applied and the model will do the heavy-lifting. # ## Binary features # __Distribution__ of binary data and the __corresponding__ values of __target__ variable. # + warnings.simplefilter(action='ignore') var = [col for col in train_copy.columns if '_bin' in col] i = 0 s_bin = train_copy.sample(frac=0.1) t1 = s_bin.loc[s_bin['target'] != 0] t0 = s_bin.loc[s_bin['target'] == 0] sns.set_style('whitegrid') plt.figure() fig, ax = plt.subplots(figsize=(15,20)) for feature in var: i += 1 plt.subplot(4,3,i) sns.kdeplot(t1[feature], bw=0.5, label="target = 1") sns.kdeplot(t0[feature], bw=0.5, label="target = 0") plt.legend(loc='best') plt.ylabel('Density plot', fontsize=12) plt.xlabel(feature, fontsize=12) locs, labels = plt.xticks() plt.tick_params(axis='both', which='major', labelsize=12) plt.savefig('./plots/binary-features.png', dpi=fig.dpi) plt.show() # - # <a id="2.4"></a> # ## Features Importance # As the categorical variables are already numerical, there is no need to apply LabelEncoding. # # __Reference__: # ><NAME>., & <NAME>. (2019). Python Machine Learning. Zaltbommel, Netherlands: Van Haren Publishing. # + # Feature scaling numerical_features = meta[(meta.level == 'numerical')].index features_n = numerical_features.to_list() training_normalized = train_df.copy() features = training_normalized[features_n] scaler = StandardScaler().fit(features.values) features = scaler.transform(features.values) training_normalized[features_n] = features # - training_data = training_normalized.copy() lgb_params_f = { 'is_unbalance': True, # because training data is extremely unbalanced 'objective': 'binary', 'boosting_type': 'dart', 'learning_rate': 0.01, 'n_estimators': 500, 'n_jobs': 2, # number of parallel threads 'importance_type': 'gain' } features_classifier = lgb.LGBMClassifier() features_classifier.set_params(**lgb_params_f) # + y = training_data.target X = training_data.drop(['target'], inplace=False, axis=1) X, X_test, y, y_test = train_test_split(X, y, test_size = 0.2) # + kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021) predicts = [] for train_index, test_index in kf.split(X, y): print('-'*40) X_train, X_val = X.iloc[train_index], X.iloc[test_index] y_train, y_val = y.iloc[train_index], y.iloc[test_index] features_classifier.fit(X_train, y_train, eval_set=[(X_val, y_val)], verbose=250, early_stopping_rounds=50) #predicts.append(features_classifier.predict(X_test)) # saving the model joblib_file = "./files/LightGBM_feature_importances.pkl" joblib.dump(features_classifier, joblib_file) # - # ## Loading prefit model try: with open("./files/LightGBM_feature_importances.pkl") as f: features_classifier = joblib.load(f.name) X = train_df.drop(['target'], inplace=False, axis=1) except IOError: print("File not accessible") # + #Plotting features importance feature_imp = pd.DataFrame(sorted(zip(features_classifier.feature_importances_,X.columns)), columns=['Value','Feature']) scaler_ft = MinMaxScaler() feature_imp['Value'] = scaler_ft.fit_transform(feature_imp['Value'].values.reshape(-1,1)) fig = plt.figure(figsize=(15, 12)) sns.barplot(x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)) plt.title('LightGBM Features Importance by avg over folds') plt.savefig('./plots/lgbm_importances.png', dpi=fig.dpi) locs, labels = plt.xticks() plt.tick_params(axis='both', which='major', labelsize=12) plt.show() # - # <a id="2.5"></a> # # Data transformation and normalization # ### Combining train and test data # Now that we have the Feature Importance, let's join the train and test data in order to perform transformation on both. all_data = pd.concat([train_df, test_df], ignore_index=True) all_data = all_data.drop('target', axis=1) # Remove target value # ## Dropping less important features # + drop_features = ['ps_car_02_cat', 'ps_ind_14','ps_ind_10_bin', 'ps_ind_11_bin','ps_ind_12_bin','ps_ind_13_bin', 'ps_car_08_cat','ps_car_10_cat', 'ps_ind_18_bin', 'ps_car_09_cat', 'ps_car_05_cat'] all_data_remaining = all_data.drop(drop_features, axis=1) print('Number of features before selection: {}'.format(all_data.shape[1])) print('Number of features after selection: {}'.format(all_data_remaining.shape[1])) # - all_data_feat_sel = all_data_remaining.copy() # ## Handling missing data # The study of missing data was formalized by <NAME> with the concept of missing mechanism in which missing-data indicators are random variables and assigned a distribution. Missing data mechanism describes the underlying mechanism that generates missing data. # # It is important to consider missing data mechanism when deciding how to deal with missing data. Because this is unknown, I will consider the missing data as part of the dataset (as a category) and just create a new feature adding the total number of missing data. # # ><NAME>. (1975). INFERENCE AND MISSING DATA. ETS Research Bulletin Series, 1975(1), i–19. https://doi.org/10.1002/j.2333-8504.1975.tb01053.x feature_names = all_data_feat_sel.columns.tolist() num_features = [c for c in feature_names if '_cat' not in c] all_data_feat_sel['missing'] = (all_data_feat_sel==-1).sum(axis=1).astype(float) num_features.append('missing') # <a id="2.2"></a> # ## Feature scaling using StandardScaler # + features_n = [col for col in all_data_feat_sel.columns if ('_cat' not in col and '_bin' not in col)] all_data_n = all_data_feat_sel.copy() features = all_data_n[features_n] # using default scaler = StandardScaler().fit(features.values) features = scaler.transform(features.values) all_data_n[features_n] = features # - # <a id="2.3"></a> # ## One-hot encoding categorical features # + all_data_enc = all_data_n.copy() categoricals_features = [col for col in all_data_feat_sel.columns if '_cat' in col] print('Before dummification we have {} variables in train'.format(all_data_enc.shape[1])) all_data_enc = pd.get_dummies(all_data_enc, columns=categoricals_features, drop_first=True) print('After dummification we have {} variables in train'.format(all_data_enc.shape[1])) # - # ## Split train and test data # + num_train = train_df.shape[0] # Number of train data final_data = all_data_enc.copy() # Divide train data and test data X = np.asarray(final_data[:num_train]) X_test = np.asarray(final_data[num_train:]) y = np.asarray(train_df['target']) # Splitting the train to use 20% later on evaluation X, X_eval, y, y_eval = train_test_split(X, y, test_size = 0.2) # - # <a id="3"></a> # # <p style="background-color:#018cb7; font-size:100%; text-align:left; color:#FFFFFF; padding: 15px 5px 15px 25px; border-radius: 15px;">3- Model</p> # ## Normalized Gini - Kaggle Evaluation # + def gini(actual, pred): assert (len(actual) == len(pred)) all = np.asarray(np.c_[actual, pred, np.arange(len(actual))], dtype=np.float) all = all[np.lexsort((all[:, 2], -1 * all[:, 1]))] totalLosses = all[:, 0].sum() giniSum = all[:, 0].cumsum().sum() / totalLosses giniSum -= (len(actual) + 1) / 2. return giniSum / len(actual) def gini_normalized(actual, pred): return gini(actual, pred) / gini(actual, actual) # - # <a id="3.1"></a> # # LightGBM lgb_params = { 'objective': 'binary', 'is_unbalance': True, # As we have a severe imbalanced class 'metric': 'auc', 'n_estimators': 500, 'n_jobs': 2, 'learning_rate': 0.01, 'num_leaves': 30 } lgb_c = lgb.LGBMClassifier() lgb_c.set_params(**lgb_params) # __NOTE__ # - __'is_unbalance': True__ # # Sets the weights of the dominated label to 1, and the weights of the dominant labels to the ratio of count of dominant/dominated. # ## K-fold cross validation # K-fold cross-validation reports on the performance of a model on several (k) samples from your training set. This provides a less biased evaluation of the model. However, K-fold cross-validation is more computationally expensive than slicing your data into three parts. It re-fits the model and tests it k-times, for each iteration, as opposed to one time. # # It can be beneficial for many reasons, for example, when your data set is not large enough to slice into three representative parts, cross-validation can help with that. Preventing overfitting to your test data without further reducing the size of your training data set. # + # %%time preds_proba_lgb = np.zeros([X.shape[0],2]) preds_lgb = np.zeros(X.shape[0]) folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021) for n_fold, (train_index, test_index) in enumerate(folds.split(X, y)): print('#'*40, f'Fold {n_fold+1} out of {folds.n_splits}', '#'*40) X_train, y_train = X[train_index], y[train_index] # Train data X_val, y_val = X[test_index], y[test_index] # Valid data lgb_c.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=250, early_stopping_rounds=50) preds_proba_lgb[test_index] += lgb_c.predict_proba(X_val, raw_score=False) preds_lgb[test_index] += lgb_c.predict(X_val, raw_score=False) # Normalized Gini coefficient for prediction probabilities gini_score = gini_normalized(y_val, preds_proba_lgb[test_index][:, 1]) print(f'Fold {n_fold+1} gini score: {gini_score}\n') # - # LightGBM total: __6min 43s__ # ## Saving the results # + # Save LightGBM model to file in the current working directory joblib_file = "./files/lightgbm/LightGBM_Model.pkl" joblib.dump(lgb_c, joblib_file) # - with open('./files/lightgbm/evals_result.json', 'w') as fp: json.dump(lgb_c.evals_result_, fp, sort_keys=True, indent=4) np.savetxt('./files/lightgbm/preds_lgb.csv', preds_lgb, fmt = '%i') np.savetxt('./files/lightgbm/preds_prob_lgb.csv', preds_proba_lgb, fmt = '%s', delimiter=",") # <a id="3.2"></a> # # XGBoost (eXtreme Gradient Boosting) params_xgb = { 'objective': 'binary:logistic', 'learning_rate': 0.01, 'n_estimators': 500, #'missing': -1, 'n_jobs': 2, 'random_state': 1986 } xgb_model = xgb.XGBClassifier(**params_xgb) # + # %%time preds_proba_xgb = np.zeros([X.shape[0],2]) preds_xgb = np.zeros(X.shape[0]) folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021) for n_fold, (train_index, test_index) in enumerate(folds.split(X, y)): print('#'*40, f'Fold {n_fold+1} out of {folds.n_splits}', '#'*40) X_train, y_train = X[train_index], y[train_index] # Train data X_val, y_val = X[test_index], y[test_index] # Valid data xgb_model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], eval_metric='auc', early_stopping_rounds=50, verbose = False) preds_proba_xgb[test_index] += xgb_model.predict_proba(X_val) preds_xgb[test_index] += xgb_model.predict(X_val) # Normalized Gini coefficient for prediction probabilities gini_score = gini_normalized(y_val, preds_proba_xgb[test_index][:, 1]) print(f'Fold {n_fold+1} gini score: {gini_score}\n') # - # XGBoost total: __3h 34min 42s__ # + # Save XGBoost model to file in the current working directory joblib_file = "./files/xgb/XGBoost_Model.pkl" joblib.dump(xgb_model, joblib_file) # - with open('./files/xgb/evals_result.json', 'w') as fp: json.dump(xgb_model.evals_result_, fp, sort_keys=True, indent=4) np.savetxt('./files/xgb/preds_xgb.csv', preds_xgb, fmt = '%i') np.savetxt('./files/xgb/preds_prob_xgb.csv', preds_proba_xgb, fmt = '%s', delimiter=",") # <a id="3.3"></a> # # Random Search for Hyper-Parameter Optimization # Random Search for Hyper-Parameter Optimization # # >https://lightgbm.readthedocs.io/en/latest/Parameters-Tuning.html # + # Randomized Search CV warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=DeprecationWarning) param_dist = {'learning_rate': [0.1, 0.01, 0.001], 'max_bin': [500, 1000, 2000], 'min_data_in_leaf': [500, 1000, 1500], 'n_estimators': [500, 1500, 2000], 'num_leaves': [10, 30, 50, 100], 'bagging_freq': [1, 3, 5], 'bagging_fraction': [0.25, 0.5, 0.75] } n_iter_search = 10 randomsearh_lgb = lgb.LGBMClassifier() random_search = RandomizedSearchCV(randomsearh_lgb, param_distributions = param_dist, n_iter = n_iter_search, return_train_score=True) start = time() random_search.fit(final_data[:num_train], train_df['target']) print("RandomizedSearchCV: %.2f seconds for %d candidates to best parameters." % ((time() - start), n_iter_search)) # - # --- # # __NOTE__: Due to computing resources limitations I did not perform RandomizedSearchCV locally # # __Parameters Tuning__ was performed on Kaggle and yield the output below: # # --- # ![RandomizedSearchCV](./img/randomSearchCV.png) # `random_search.best_params_` # # ![Best-Params](./img/best-params.png) # <a id="3.4"></a> # # LightGBM Tuned # + lgb_params_tuned = { 'objective': 'binary', 'is_unbalance': True, # As we have a severe imbalanced class 'metric': 'auc', 'n_estimators': 2500, 'n_jobs': 2, 'num_leaves': 50, 'max_bin': 2000, 'learning_rate': 0.001, 'min_data_in_leaf': 200, 'feature_fraction': 0.7, 'bagging_freq': 1, 'bagging_fraction': 0.5 } lgb_tuned = lgb.LGBMClassifier() lgb_tuned.set_params(**lgb_params_tuned) # + # %%time preds_proba_lgb_tuned = np.zeros([X.shape[0],2]) preds_lgb_tuned = np.zeros(X.shape[0]) folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021) for n_fold, (train_index, test_index) in enumerate(folds.split(X, y)): print('#'*40, f'Fold {n_fold+1} out of {folds.n_splits}', '#'*40) X_train, y_train = X[train_index], y[train_index] # Train data X_val, y_val = X[test_index], y[test_index] # Valid data lgb_tuned.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_val, y_val)], verbose=1250, early_stopping_rounds=200) preds_proba_lgb_tuned[test_index] += lgb_tuned.predict_proba(X_val) preds_lgb_tuned[test_index] += lgb_tuned.predict(X_val) # Normalized Gini coefficient for prediction probabilities gini_score = gini_normalized(y_val, preds_proba_lgb_tuned[test_index][:, 1]) print(f'Fold {n_fold+1} gini score: {gini_score}\n') # - # LightGBM Tuned total: __1h 6min 3s__ # ## Saving the results # + # Save LightGBM model to file in the current working directory joblib_file = "./files/lightgbm/LightGBM_tuned.pkl" joblib.dump(lgb_tuned, joblib_file) # - with open('./files/lightgbm/evals_result_tuned.json', 'w') as fp: json.dump(lgb_tuned.evals_result_, fp, sort_keys=True, indent=4) np.savetxt('./files/lightgbm/preds_lgb_tuned.csv', preds_lgb_tuned, fmt = '%i') np.savetxt('./files/lightgbm/preds_prob_lgb_tuned.csv', preds_proba_lgb_tuned, fmt = '%s', delimiter=",") # <a id="4"></a> # # <p style="background-color:#018cb7; font-size:100%; text-align:left; color:#FFFFFF; padding: 15px 5px 15px 25px; border-radius: 15px;">4- Evaluation</p> # <a id="4.1"></a> # # Normalized Gini Coefficient # The Gini index or Gini coefficient is a statistical measure of distribution that was developed by the Italian statistician Corrado Gini in 1912. It is used as a gauge of economic inequality, measuring income distribution among a population. # # The Gini coefficient is equal to the area below the line of perfect equality (0.5 by definition) minus the area below the Lorenz curve, divided by the area below the line of perfect equality. In other words, it is double the area between the Lorenz curve and the line of perfect equality. # # Gini is a vital metric for insurers because the main concern focuses on segregating high and low risks rather than predicting losses. The reason is that this information is used to price insurance risks and charging customers on their predicted loss is not accountable for expenses and profit. # # Since this task is classification, AUC was used as a metric because it's equivalent to using Gini since: # # $$ Gini = 2 * AUC - 1 $$ # In order to compute gini coefficient we should apply two integrals with the cumulative proportion of positive class # # $$A=\int_{0}^{1}F(x)dx\approx \sum_{i=1}^{n}F_{i}(x)\times \frac{1}{n}$$ # # $$B=\int_{0}^{1}x \; dx\approx \sum_{i=1}^{n}\frac{i}{n} \times \frac{1}{n}= \frac{1}{n^{2}}\times \frac{n\times (n+1)}{2}\approx 0.5$$ # # $$Gini\, coeff= A-B=\frac{1}{n}\left (\sum_{i=1}^{n}F_{i}(x) -\frac{n+1}{2} \right)$$ # __Reference__: # >https://theblog.github.io/post/gini-coefficient-intuitive-explanation/ # Because both classes are important due to the segregating aspect mentioned on the Gini coefficient, the evaluation metric chosen will be AUC. # <a id="4.2"></a> # # LightGBM lightgbm_model = joblib.load('./files/lightgbm/LightGBM_Model.pkl') preds_lgb = np.fromstring(loadtxt('./files/lightgbm/preds_lgb.csv')) preds_prob_lgb = np.loadtxt('./files/lightgbm/preds_prob_lgb.csv', delimiter=',') lightgbm_eval_probs = lightgbm_model.predict_proba(X_eval) lightgbm_eval_probs = lightgbm_eval_probs[:, 1] roc_auc_lightgbm = roc_auc_score(y_eval, lightgbm_eval_probs) print('LightGBM ROC AUC %.3f' % roc_auc_lightgbm) # + # fpr = FalsePositiveRate # tpr = TruePositiveRate # calculate roc curves fpr_lgbm, tpr_lgbm, thresholds_lgbm = roc_curve(y_eval, lightgbm_eval_probs) # get the best threshold J_lgbm = tpr_lgbm - fpr_lgbm # Youden's index ix_lgbm = argmax(J_lgbm) best_thresh_lgbm = thresholds_lgbm[ix_lgbm] print('Best Threshold for LightGBM Model= %f' % (best_thresh_lgbm)) # - # - J = Sensitivity + Specificity – 1 # - J = Sensitivity + (1 – FalsePositiveRate) – 1 # - J = TruePositiveRate – FalsePositiveRate # # >https://en.wikipedia.org/wiki/Youden%27s_J_statistic lgbm_gini= gini(y_eval, lightgbm_eval_probs) lgbm_gini_max = gini(y_eval, y_eval) lgbm_ngini= gini_normalized(y_eval, lightgbm_eval_probs) print('Gini: %.3f \nMax. Gini: %.3f \nNormalized Gini: %.3f' % (lgbm_gini, lgbm_gini_max, lgbm_ngini)) # + # Sort the actual values by the predictions data_lgb = np.asarray(np.c_[y_eval, lightgbm_eval_probs,np.arange(len(y_eval))]) sorted_actual_lgb= data_lgb[np.lexsort((data_lgb[:,2],-1 * data_lgb[:, 1]))][:,0] # Sum up the actual values cumulative_actual_lgb = np.cumsum(sorted_actual_lgb) cumulative_index_lgb = np.arange(1, len(cumulative_actual_lgb)+1) cumulative_actual_shares_lgb = cumulative_actual_lgb / sum(y_eval) cumulative_index_shares_lgb = cumulative_index_lgb / len(lightgbm_eval_probs) # Add (0, 0) to the plot x_values_lgb = [0] + list(cumulative_index_shares_lgb) y_values_lgb = [0] + list(cumulative_actual_shares_lgb) # Display the 45° line stacked on top of the y values diagonal_lgb = [x - y for (x, y) in zip(x_values_lgb, y_values_lgb)] plt.stackplot(x_values_lgb, y_values_lgb, diagonal_lgb, colors=['tab:blue', 'tab:orange']) plt.legend(['_nolegend_','Gini Coefficient'], loc=2) plt.xlabel('Cumulative Share of Predictions') plt.ylabel('Cumulative Share of Actual Values') plt.title('LightGBM | Gini = %.3f'%lgbm_gini) plt.show() # - # <a id="4.3"></a> # # XGBoost xgb_model = joblib.load('./files/xgb/XGBoost_Model.pkl') preds_xgb = np.fromstring(loadtxt('./files/xgb/preds_xgb.csv')) preds_prob_xgb = np.loadtxt('./files/xgb/preds_prob_xgb.csv', delimiter=',') xgb_eval_probs = xgb_model.predict_proba(X_eval) xgb_eval_probs = xgb_eval_probs[:, 1] # calculate roc curves fpr_xgb, tpr_xgb, thresholds_xgb = roc_curve(y_eval, xgb_eval_probs) # get the best threshold J_xgb = tpr_xgb - fpr_xgb # Youden's index ix_xgb = argmax(J_xgb) best_thresh_xgb = thresholds_xgb[ix_xgb] print('Best Threshold for XGBoost Model= %f' % (best_thresh_xgb)) roc_auc_xgb = roc_auc_score(y_eval, xgb_eval_probs) print('XGBoost ROC AUC %.3f' % roc_auc_xgb) xgb_gini= gini(y_eval, xgb_eval_probs) xgb_gini_max = gini(y_eval, y_eval) xgb_ngini= gini_normalized(y_eval, xgb_eval_probs) print('Gini: %.3f \nMax. Gini: %.3f \nNormalized Gini: %.3f' % (xgb_gini, xgb_gini_max, xgb_ngini)) # + # Sort the actual values by the predictions data_xgb = np.asarray(np.c_[y_eval, xgb_eval_probs,np.arange(len(y_eval))]) sorted_actual_xgb= data_lgb[np.lexsort((data_xgb[:,2],-1 * data_xgb[:, 1]))][:,0] # Sum up the actual values cumulative_actual_xgb = np.cumsum(sorted_actual_xgb) cumulative_index_xgb = np.arange(1, len(cumulative_actual_xgb)+1) cumulative_actual_shares_xgb = cumulative_actual_xgb / sum(y_eval) cumulative_index_shares_xgb = cumulative_index_xgb / len(xgb_eval_probs) # Add (0, 0) to the plot x_values_xgb = [0] + list(cumulative_index_shares_xgb) y_values_xgb = [0] + list(cumulative_actual_shares_xgb) # Display the 45° line stacked on top of the y values diagonal_xgb = [x - y for (x, y) in zip(x_values_xgb, y_values_xgb)] plt.stackplot(x_values_xgb, y_values_xgb, diagonal_xgb, colors=['tab:blue', 'tab:orange']) plt.legend(['_nolegend_','Gini Coefficient'], loc=2) plt.xlabel('Cumulative Share of Predictions') plt.ylabel('Cumulative Share of Actual Values') plt.title('XGBoost | Gini = %.3f'%xgb_gini) plt.show() # - # <a id="4.4"></a> # # LightGBM - Tuned Model lightgbm_tuned = joblib.load('./files/lightgbm/LightGBM_tuned.pkl') preds_lgb_tuned = np.fromstring(loadtxt('./files/lightgbm/preds_lgb_tuned.csv')) preds_prob_lgb_tuned = np.loadtxt('./files/lightgbm/preds_prob_lgb_tuned.csv', delimiter=',') lightgbm_tuned_eval_probs = lightgbm_tuned.predict_proba(X_eval) lightgbm_tuned_eval_probs = lightgbm_tuned_eval_probs[:, 1] roc_auc_lightgbm_tuned = roc_auc_score(y_eval, lightgbm_tuned_eval_probs) print('LightGBM Tuned ROC AUC %.3f' % roc_auc_lightgbm_tuned) # calculate roc curves fpr_lgbm_tuned, tpr_lgbm_tuned, thresholds_lgbm_tuned = roc_curve(y_eval, lightgbm_tuned_eval_probs) # get the best threshold J_lgbm_tuned = tpr_lgbm_tuned - fpr_lgbm_tuned # Youden's index ix_lgbm_tuned = argmax(J_lgbm_tuned) best_thresh_lgbm_tuned = thresholds_lgbm_tuned[ix_lgbm_tuned] print('Best Threshold for LightGBM Tuned Model= %f' % (best_thresh_lgbm_tuned)) lgbm_tuned_gini= gini(y_eval, lightgbm_tuned_eval_probs) lgbm_tuned_gini_max = gini(y_eval, y_eval) lgbm_tuned_ngini= gini_normalized(y_eval, lightgbm_tuned_eval_probs) print('Gini: %.3f \nMax. Gini: %.3f \nNormalized Gini: %.3f' % (lgbm_tuned_gini, lgbm_tuned_gini_max, lgbm_tuned_ngini)) # + # Sort the actual values by the predictions data_lgbm_tuned = np.asarray(np.c_[y_eval, lightgbm_tuned_eval_probs,np.arange(len(y_eval))]) sorted_actual_lgbm_tuned = data_lgbm_tuned[np.lexsort((data_lgbm_tuned[:,2],-1 * data_lgbm_tuned[:, 1]))][:,0] # Sum up the actual values cumulative_actual_lgbm_tuned = np.cumsum(sorted_actual_lgbm_tuned) cumulative_index_lgbm_tuned = np.arange(1, len(cumulative_actual_lgbm_tuned)+1) cumulative_actual_shares_lgbm_tuned = cumulative_actual_lgbm_tuned / sum(y_eval) cumulative_index_shares_lgbm_tuned = cumulative_index_lgbm_tuned / len(lightgbm_tuned_eval_probs) # Add (0, 0) to the plot x_values_lgbm_tuned = [0] + list(cumulative_index_shares_lgbm_tuned) y_values_lgbm_tuned = [0] + list(cumulative_actual_shares_lgbm_tuned) # Display the 45° line stacked on top of the y values diagonal_lgbm_tuned = [x - y for (x, y) in zip(x_values_lgbm_tuned, y_values_lgbm_tuned)] plt.stackplot(x_values_lgbm_tuned, y_values_lgbm_tuned, diagonal_lgbm_tuned, colors=['tab:blue', 'tab:orange']) plt.legend(['_nolegend_','Gini Coefficient'], loc=2) plt.xlabel('Cumulative Share of Predictions') plt.ylabel('Cumulative Share of Actual Values') plt.title('LightGBM Tuned | Gini = %.3f'%lgbm_tuned_gini) plt.show() # - # <a id="4.5"></a> # # ROC Area Under Curve (AUC) Score # plot the roc curve for the model plt.plot([0,1], [0,1], linestyle='--') plt.plot(fpr_lgbm, tpr_lgbm, label='LightGBM (AUC = %.2f)' % roc_auc_lightgbm) plt.plot(fpr_lgbm_tuned, tpr_lgbm_tuned, label='LightGBM Tuned (AUC = %.2f)' % roc_auc_lightgbm_tuned) plt.plot(fpr_xgb, tpr_xgb, label='XGBoost (AUC = %.2f)' % roc_auc_xgb) # axis labels plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC Area Under Curve') plt.legend() # show the plot plt.show() # All 3 models' performance (AUC) were very similar and using Random Search for Hyper-Parameter Optimization only increased 0.01 in AUC metric. One interesting point was about two models with the same score, LightGBM (first version) and XGBoost, but with almost 3,5 hours difference in training time. # ## Comparative table # | Model | Normalized Gini| AUC | Training time | # | :-------------- |:------------: | :------: | -------------: | # | LightGBM | 0.33 | 0.67 | 6min 43s | # | XGBoost | 0.33 | 0.67 | 3h 34min 42s | # | **LightGBM Tuned** | **0.35** | **0.68** | **1h 6min 3s** | # <a id="5"></a> # # <p style="background-color:#018cb7; font-size:100%; text-align:left; color:#FFFFFF; padding: 15px 5px 15px 25px; border-radius: 15px;">5- Kaggle Submission</p> # X_test is the test.csv provided with all transformations applied test_preds_lgb = lightgbm_tuned.predict_proba(X_test) test_preds_xgb = xgb_model.predict_proba(X_test) # slicing to get the probability for the positive class only test_preds_lgb = test_preds_lgb[:,1:] test_preds_xgb = test_preds_xgb[:,1:] submission = pd.read_csv(path + 'sample_submission.csv', index_col='id') submission['target'] = test_preds_lgb submission.to_csv('./kaggle-submission/LightGBM.csv') submission = pd.read_csv(path + 'sample_submission.csv', index_col='id') submission['target'] = test_preds_xgb submission.to_csv('./kaggle-submission/XGBoost.csv') submission = pd.read_csv(path + 'sample_submission.csv', index_col='id') ensemble_test_preds = test_preds_lgb * 0.6 + test_preds_xgb * 0.4 submission['target'] = ensemble_test_preds submission.to_csv('./kaggle-submission/Ensemble.csv') # --- # <img title="GitHub Mark" src="https://github.com/pessini/insurance-claim-prediction/blob/main/img/GitHub-Mark-64px.png?raw=true" style="height: 32px; padding-right: 15px" alt="GitHub Mark" align="left"> [GitHub repository](https://github.com/pessini/insurance-claim-prediction) <br>Author: <NAME>
safe-driver-prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=["remove_cell"] # # Deutsch-Jozsa Algorithm # - # In this section, we first introduce the Deutsch-Jozsa problem, and classical and quantum algorithms to solve it. We then implement the quantum algorithm using Qiskit, and run it on a simulator and device. # + [markdown] tags=["contents"] # ## Contents # # 1. [Introduction](#introduction) # 1.1 [Deutsch-Jozsa Problem](#djproblem) # 1.2 [Deutsch-Jozsa Algorithm](#classical-solution) # 1.3 [The Quantum Solution](#quantum-solution) # 1.4 [Why Does This Work?](#why-does-this-work) # 2. [Worked Example](#example) # 3. [Creating Quantum Oracles](#creating-quantum-oracles) # 4. [Qiskit Implementation](#implementation) # 4.1 [Constant Oracle](#const_oracle) # 4.2 [Balanced Oracle](#balanced_oracle) # 4.3 [The Full Algorithm](#full_alg) # 4.4 [Generalised Circuit](#general_circs) # 5. [Running on Real Devices](#device) # 6. [Problems](#problems) # 7. [References](#references) # - # ## 1. Introduction <a id='introduction'></a> # The Deutsch-Jozsa algorithm, first introduced in Reference [1], was the first example of a quantum algorithm that performs better than the best classical algorithm. It showed that there can be advantages to using a quantum computer as a computational tool for a specific problem. # ### 1.1 Deutsch-Jozsa Problem <a id='djproblem'> </a> # # We are given a hidden Boolean function $f$, which takes as input a string of bits, and returns either $0$ or $1$, that is: # # $$ # f(\{x_0,x_1,x_2,...\}) \rightarrow 0 \textrm{ or } 1 \textrm{ , where } x_n \textrm{ is } 0 \textrm{ or } 1$$ # # The property of the given Boolean function is that it is guaranteed to either be balanced or constant. A constant function returns all $0$'s or all $1$'s for any input, while a balanced function returns $0$'s for exactly half of all inputs and $1$'s for the other half. Our task is to determine whether the given function is balanced or constant. # # Note that the Deutsch-Jozsa problem is an $n$-bit extension of the single bit Deutsch problem. # # ### 1.2 The Classical Solution <a id='classical-solution'> </a> # # Classically, in the best case, two queries to the oracle can determine if the hidden Boolean function, $f(x)$, is balanced: # e.g. if we get both $f(0,0,0,...)\rightarrow 0$ and $f(1,0,0,...) \rightarrow 1$, then we know the function is balanced as we have obtained the two different outputs. # # In the worst case, if we continue to see the same output for each input we try, we will have to check exactly half of all possible inputs plus one in order to be certain that $f(x)$ is constant. Since the total number of possible inputs is $2^n$, this implies that we need $2^{n-1}+1$ trial inputs to be certain that $f(x)$ is constant in the worst case. For example, for a $4$-bit string, if we checked $8$ out of the $16$ possible combinations, getting all $0$'s, it is still possible that the $9^\textrm{th}$ input returns a $1$ and $f(x)$ is balanced. Probabilistically, this is a very unlikely event. In fact, if we get the same result continually in succession, we can express the probability that the function is constant as a function of $k$ inputs as: # # # # $$ P_\textrm{constant}(k) = 1 - \frac{1}{2^{k-1}} \qquad \textrm{for } 1 < k \leq 2^{n-1}$$ # # # # Realistically, we could opt to truncate our classical algorithm early, say if we were over x% confident. But if we want to be 100% confident, we would need to check $2^{n-1}+1$ inputs. # ### 1.3 Quantum Solution <a id='quantum-solution'> </a> # # Using a quantum computer, we can solve this problem with 100% confidence after only one call to the function $f(x)$, provided we have the function $f$ implemented as a quantum oracle, which maps the state $\vert x\rangle \vert y\rangle $ to $ \vert x\rangle \vert y \oplus f(x)\rangle$, where $\oplus$ is addition modulo $2$. Below is the generic circuit for the Deutsh-Jozsa algorithm. # # ![image1](images/deutsch_steps.png) # # Now, let's go through the steps of the algorithm: # # <ol> # <li> # Prepare two quantum registers. The first is an $n$-qubit register initialised to $|0\rangle$, and the second is a one-qubit register initialised to $|1\rangle$: # # # $$\vert \psi_0 \rangle = \vert0\rangle^{\otimes n} \vert 1\rangle$$ # # # </li> # # <li> # Apply a Hadamard gate to each qubit: # # # $$\vert \psi_1 \rangle = \frac{1}{\sqrt{2^{n+1}}}\sum_{x=0}^{2^n-1} \vert x\rangle \left(|0\rangle - |1 \rangle \right)$$ # # # </li> # # <li> # Apply the quantum oracle $\vert x\rangle \vert y\rangle$ to $\vert x\rangle \vert y \oplus f(x)\rangle$: # $$ # \begin{aligned} # \lvert \psi_2 \rangle # & = \frac{1}{\sqrt{2^{n+1}}}\sum_{x=0}^{2^n-1} \vert x\rangle (\vert f(x)\rangle - \vert 1 \oplus f(x)\rangle) \\ # & = \frac{1}{\sqrt{2^{n+1}}}\sum_{x=0}^{2^n-1}(-1)^{f(x)}|x\rangle ( |0\rangle - |1\rangle ) # \end{aligned} # $$ # # since for each $x,f(x)$ is either $0$ or $1$. # </li> # # <li> # At this point the second single qubit register may be ignored. Apply a Hadamard gate to each qubit in the first register: # $$ # \begin{aligned} # \lvert \psi_3 \rangle # & = \frac{1}{2^n}\sum_{x=0}^{2^n-1}(-1)^{f(x)} # \left[ \sum_{y=0}^{2^n-1}(-1)^{x \cdot y} # \vert y \rangle \right] \\ # & = \frac{1}{2^n}\sum_{y=0}^{2^n-1} # \left[ \sum_{x=0}^{2^n-1}(-1)^{f(x)}(-1)^{x \cdot y} \right] # \vert y \rangle # \end{aligned} # $$ # # where $x \cdot y = x_0y_0 \oplus x_1y_1 \oplus \ldots \oplus x_{n-1}y_{n-1}$ is the sum of the bitwise product. # </li> # # <li> # Measure the first register. Notice that the probability of measuring $\vert 0 \rangle ^{\otimes n} = \lvert \frac{1}{\sqrt{2^n}}\sum_{x=0}^{2^n-1}(-1)^{f(x)} \rvert^2$, which evaluates to $1$ if $f(x)$ is constant and $0$ if $f(x)$ is balanced. # </li> # # </ol> # # ### 1.4 Why Does This Work? <a id='why-does-this-work'> </a> # # - **Constant Oracle** # # When the oracle is *constant*, it has no effect (up to a global phase) on the input qubits, and the quantum states before and after querying the oracle are the same. Since the H-gate is its own inverse, in Step 4 we reverse Step 2 to obtain the initial quantum state of $|00\dots 0\rangle$ in the first register. # # $$ # H^{\otimes n}\begin{bmatrix} 1 \\ 0 \\ 0 \\ \vdots \\ 0 \end{bmatrix} # = # \tfrac{1}{\sqrt{2^n}}\begin{bmatrix} 1 \\ 1 \\ 1 \\ \vdots \\ 1 \end{bmatrix} # \quad \xrightarrow{\text{after } U_f} \quad # H^{\otimes n}\tfrac{1}{\sqrt{2^n}}\begin{bmatrix} 1 \\ 1 \\ 1 \\ \vdots \\ 1 \end{bmatrix} # = # \begin{bmatrix} 1 \\ 0 \\ 0 \\ \vdots \\ 0 \end{bmatrix} # $$ # # - **Balanced Oracle** # # After step 2, our input register is an equal superposition of all the states in the computational basis. When the oracle is *balanced*, phase kickback adds a negative phase to exactly half these states: # # $$ # U_f \tfrac{1}{\sqrt{2^n}}\begin{bmatrix} 1 \\ 1 \\ 1 \\ \vdots \\ 1 \end{bmatrix} # = # \tfrac{1}{\sqrt{2^n}}\begin{bmatrix} -1 \\ 1 \\ -1 \\ \vdots \\ 1 \end{bmatrix} # $$ # # # The quantum state after querying the oracle is orthogonal to the quantum state before querying the oracle. Thus, in Step 4, when applying the H-gates, we must end up with a quantum state that is orthogonal to $|00\dots 0\rangle$. This means we should never measure the all-zero state. # # ## 2. Worked Example <a id='example'></a> # # Let's go through a specific example for a two bit balanced function: # # <ol> # <li> The first register of two qubits is initialized to $|00\rangle$ and the second register qubit to $|1\rangle$ # # (Note that we are using subscripts 1, 2, and 3 to index the qubits. A subscript of "12" indicates the state of the register containing qubits 1 and 2) # # # $$\lvert \psi_0 \rangle = \lvert 0 0 \rangle_{12} \otimes \lvert 1 \rangle_{3} $$ # # # </li> # # <li> Apply Hadamard on all qubits # # # $$\lvert \psi_1 \rangle = \frac{1}{2} \left( \lvert 0 0 \rangle + \lvert 0 1 \rangle + \lvert 1 0 \rangle + \lvert 1 1 \rangle \right)_{12} \otimes \frac{1}{\sqrt{2}} \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{3} $$ # # # </li> # # <li> The oracle function can be implemented as $\text{Q}_f = CX_{13}CX_{23}$, # $$ # \begin{align*} # \lvert \psi_2 \rangle = \frac{1}{2\sqrt{2}} \left[ \lvert 0 0 \rangle_{12} \otimes \left( \lvert 0 \oplus 0 \oplus 0 \rangle - \lvert 1 \oplus 0 \oplus 0 \rangle \right)_{3} \\ # + \lvert 0 1 \rangle_{12} \otimes \left( \lvert 0 \oplus 0 \oplus 1 \rangle - \lvert 1 \oplus 0 \oplus 1 \rangle \right)_{3} \\ # + \lvert 1 0 \rangle_{12} \otimes \left( \lvert 0 \oplus 1 \oplus 0 \rangle - \lvert 1 \oplus 1 \oplus 0 \rangle \right)_{3} \\ # + \lvert 1 1 \rangle_{12} \otimes \left( \lvert 0 \oplus 1 \oplus 1 \rangle - \lvert 1 \oplus 1 \oplus 1 \rangle \right)_{3} \right] # \end{align*} # $$ # </li> # # <li>Simplifying this, we get the following: # $$ # \begin{aligned} # \lvert \psi_2 \rangle & = \frac{1}{2\sqrt{2}} \left[ \lvert 0 0 \rangle_{12} \otimes \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{3} - \lvert 0 1 \rangle_{12} \otimes \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{3} - \lvert 1 0 \rangle_{12} \otimes \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{3} + \lvert 1 1 \rangle_{12} \otimes \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{3} \right] \\ # & = \frac{1}{2} \left( \lvert 0 0 \rangle - \lvert 0 1 \rangle - \lvert 1 0 \rangle + \lvert 1 1 \rangle \right)_{12} \otimes \frac{1}{\sqrt{2}} \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{3} \\ # & = \frac{1}{\sqrt{2}} \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{1} \otimes \frac{1}{\sqrt{2}} \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{2} \otimes \frac{1}{\sqrt{2}} \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{3} # \end{aligned} # $$ # </li> # # <li> Apply Hadamard on the first register # # # $$ \lvert \psi_3\rangle = \lvert 1 \rangle_{1} \otimes \lvert 1 \rangle_{2} \otimes \left( \lvert 0 \rangle - \lvert 1 \rangle \right)_{3} $$ # # # </li> # # <li> Measuring the first two qubits will give the non-zero $11$, indicating a balanced function. # </li> # </ol> # # You can try out similar examples using the widget below. Press the buttons to add H-gates and oracles, re-run the cell and/or set `case="constant"` to try out different oracles. from qiskit_textbook.widgets import dj_widget dj_widget(size="small", case="balanced") # ## 3. Creating Quantum Oracles <a id='creating-quantum-oracles'> </a> # # Let's see some different ways we can create a quantum oracle. # # For a constant function, it is simple: # # $\qquad$ 1. if f(x) = 0, then apply the $I$ gate to the qubit in register 2. # $\qquad$ 2. if f(x) = 1, then apply the $X$ gate to the qubit in register 2. # # For a balanced function, there are many different circuits we can create. One of the ways we can guarantee our circuit is balanced is by performing a CNOT for each qubit in register 1, with the qubit in register 2 as the target. For example: # # ![image2](images/deutsch_balanced1.svg) # # In the image above, the top three qubits form the input register, and the bottom qubit is the output register. We can see which input states give which output in the table below: # # | Input states that output 0 | Input States that output 1 | # |:--------------------------:|:--------------------------:| # | 000 | 001 | # | 011 | 100 | # | 101 | 010 | # | 110 | 111 | # # # We can change the results while keeping them balanced by wrapping selected controls in X-gates. For example, see the circuit and its results table below: # # ![other_balanced_circuit](images/deutsch_balanced2.svg) # # | Input states that output 0 | Input states that output 1 | # |:--------------------------:|:--------------------------:| # | 001 | 000 | # | 010 | 011 | # | 100 | 101 | # | 111 | 110 | # ## 4. Qiskit Implementation <a id='implementation'></a> # # We now implement the Deutsch-Jozsa algorithm for the example of a three-bit function, with both constant and balanced oracles. First let's do our imports: # + tags=["thebelab-init"] # initialization import numpy as np # importing Qiskit from qiskit import IBMQ, Aer from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, assemble, transpile # import basic plot tools from qiskit.visualization import plot_histogram # - # Next, we set the size of the input register for our oracle: # set the length of the n-bit input string. n = 3 # ### 4.1 Constant Oracle <a id='const_oracle'></a> # Let's start by creating a constant oracle, in this case the input has no effect on the ouput so we just randomly set the output qubit to be 0 or 1: # + tags=["thebelab-init"] # set the length of the n-bit input string. n = 3 const_oracle = QuantumCircuit(n+1) output = np.random.randint(2) if output == 1: const_oracle.x(n) const_oracle.draw() # - # ### 4.2 Balanced Oracle <a id='balanced_oracle'></a> balanced_oracle = QuantumCircuit(n+1) # Next, we create a balanced oracle. As we saw in section 1b, we can create a balanced oracle by performing CNOTs with each input qubit as a control and the output bit as the target. We can vary the input states that give 0 or 1 by wrapping some of the controls in X-gates. Let's first choose a binary string of length `n` that dictates which controls to wrap: b_str = "101" # Now we have this string, we can use it as a key to place our X-gates. For each qubit in our circuit, we place an X-gate if the corresponding digit in `b_str` is `1`, or do nothing if the digit is `0`. # + balanced_oracle = QuantumCircuit(n+1) b_str = "101" # Place X-gates for qubit in range(len(b_str)): if b_str[qubit] == '1': balanced_oracle.x(qubit) balanced_oracle.draw() # - # Next, we do our controlled-NOT gates, using each input qubit as a control, and the output qubit as a target: # + balanced_oracle = QuantumCircuit(n+1) b_str = "101" # Place X-gates for qubit in range(len(b_str)): if b_str[qubit] == '1': balanced_oracle.x(qubit) # Use barrier as divider balanced_oracle.barrier() # Controlled-NOT gates for qubit in range(n): balanced_oracle.cx(qubit, n) balanced_oracle.barrier() balanced_oracle.draw() # - # Finally, we repeat the code from two cells up to finish wrapping the controls in X-gates: # + tags=["thebelab-init"] balanced_oracle = QuantumCircuit(n+1) b_str = "101" # Place X-gates for qubit in range(len(b_str)): if b_str[qubit] == '1': balanced_oracle.x(qubit) # Use barrier as divider balanced_oracle.barrier() # Controlled-NOT gates for qubit in range(n): balanced_oracle.cx(qubit, n) balanced_oracle.barrier() # Place X-gates for qubit in range(len(b_str)): if b_str[qubit] == '1': balanced_oracle.x(qubit) # Show oracle balanced_oracle.draw() # - # We have just created a balanced oracle! All that's left to do is see if the Deutsch-Joza algorithm can solve it. # # ### 4.3 The Full Algorithm <a id='full_alg'></a> # # Let's now put everything together. This first step in the algorithm is to initialise the input qubits in the state $|{+}\rangle$ and the output qubit in the state $|{-}\rangle$: # + dj_circuit = QuantumCircuit(n+1, n) # Apply H-gates for qubit in range(n): dj_circuit.h(qubit) # Put qubit in state |-> dj_circuit.x(n) dj_circuit.h(n) dj_circuit.draw() # - # Next, let's apply the oracle. Here we apply the `balanced_oracle` we created above: # + dj_circuit = QuantumCircuit(n+1, n) # Apply H-gates for qubit in range(n): dj_circuit.h(qubit) # Put qubit in state |-> dj_circuit.x(n) dj_circuit.h(n) # Add oracle dj_circuit += balanced_oracle dj_circuit.draw() # - # Finally, we perform H-gates on the $n$-input qubits, and measure our input register: # + dj_circuit = QuantumCircuit(n+1, n) # Apply H-gates for qubit in range(n): dj_circuit.h(qubit) # Put qubit in state |-> dj_circuit.x(n) dj_circuit.h(n) # Add oracle dj_circuit += balanced_oracle # Repeat H-gates for qubit in range(n): dj_circuit.h(qubit) dj_circuit.barrier() # Measure for i in range(n): dj_circuit.measure(i, i) # Display circuit dj_circuit.draw() # - # Let's see the output: # + # use local simulator qasm_sim = Aer.get_backend('qasm_simulator') shots = 1024 qobj = assemble(dj_circuit, qasm_sim) results = qasm_sim.run(qobj).result() answer = results.get_counts() plot_histogram(answer) # - # We can see from the results above that we have a 0% chance of measuring `000`. This correctly predicts the function is balanced. # # ### 4.4 Generalised Circuits <a id='general_circs'></a> # # Below, we provide a generalised function that creates Deutsch-Joza oracles and turns them into quantum gates. It takes the `case`, (either `'balanced'` or '`constant`', and `n`, the size of the input register: # + tags=["thebelab-init"] def dj_oracle(case, n): # We need to make a QuantumCircuit object to return # This circuit has n+1 qubits: the size of the input, # plus one output qubit oracle_qc = QuantumCircuit(n+1) # First, let's deal with the case in which oracle is balanced if case == "balanced": # First generate a random number that tells us which CNOTs to # wrap in X-gates: b = np.random.randint(1,2**n) # Next, format 'b' as a binary string of length 'n', padded with zeros: b_str = format(b, '0'+str(n)+'b') # Next, we place the first X-gates. Each digit in our binary string # corresponds to a qubit, if the digit is 0, we do nothing, if it's 1 # we apply an X-gate to that qubit: for qubit in range(len(b_str)): if b_str[qubit] == '1': oracle_qc.x(qubit) # Do the controlled-NOT gates for each qubit, using the output qubit # as the target: for qubit in range(n): oracle_qc.cx(qubit, n) # Next, place the final X-gates for qubit in range(len(b_str)): if b_str[qubit] == '1': oracle_qc.x(qubit) # Case in which oracle is constant if case == "constant": # First decide what the fixed output of the oracle will be # (either always 0 or always 1) output = np.random.randint(2) if output == 1: oracle_qc.x(n) oracle_gate = oracle_qc.to_gate() oracle_gate.name = "Oracle" # To show when we display the circuit return oracle_gate # - # Let's also create a function that takes this oracle gate and performs the Deutsch-Joza algorithm on it: # + tags=["thebelab-init"] def dj_algorithm(oracle, n): dj_circuit = QuantumCircuit(n+1, n) # Set up the output qubit: dj_circuit.x(n) dj_circuit.h(n) # And set up the input register: for qubit in range(n): dj_circuit.h(qubit) # Let's append the oracle gate to our circuit: dj_circuit.append(oracle, range(n+1)) # Finally, perform the H-gates again and measure: for qubit in range(n): dj_circuit.h(qubit) for i in range(n): dj_circuit.measure(i, i) return dj_circuit # - # Finally, let's use these functions to play around with the algorithm: n = 4 oracle_gate = dj_oracle('balanced', n) dj_circuit = dj_algorithm(oracle_gate, n) dj_circuit.draw() # And see the results of running this circuit: transpiled_dj_circuit = transpile(dj_circuit, qasm_sim) qobj = assemble(transpiled_dj_circuit) results = qasm_sim.run(qobj).result() answer = results.get_counts() plot_histogram(answer) # ## 5. Experiment with Real Devices <a id='device'></a> # # We can run the circuit on the real device as shown below. We first look for the least-busy device that can handle our circuit. # + tags=["uses-hardware"] # Load our saved IBMQ accounts and get the least busy backend device with greater than or equal to (n+1) qubits IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= (n+1) and not x.configuration().simulator and x.status().operational==True)) print("least busy backend: ", backend) # + tags=["uses-hardware"] # Run our circuit on the least busy backend. Monitor the execution of the job in the queue from qiskit.tools.monitor import job_monitor shots = 1024 transpiled_dj_circuit = transpile(dj_circuit, backend, optimization_level=3) qobj = assemble(transpiled_dj_circuit, backend) job = backend.run(qobj) job_monitor(job, interval=2) # + tags=["uses-hardware"] # Get the results of the computation results = job.result() answer = results.get_counts() plot_histogram(answer) # - # As we can see, the most likely result is `1111`. The other results are due to errors in the quantum computation. # ## 6. Problems <a id='problems'></a> # # 1. Are you able to create a balanced or constant oracle of a different form? # # 2. The function `dj_problem_oracle` (below) returns a Deutsch-Joza oracle for `n = 4` in the form of a gate. The gate takes 5 qubits as input where the final qubit (`q_4`) is the output qubit (as with the example oracles above). You can get different oracles by giving `dj_problem_oracle` different integers between 1 and 5. Use the Deutsch-Joza algorithm to decide whether each oracle is balanced or constant (**Note:** It is highly recommended you try this example using the `qasm_simulator` instead of a real device). from qiskit_textbook.problems import dj_problem_oracle oracle = dj_problem_oracle(1) # ## 7. References <a id='references'></a> # # 1. <NAME> and <NAME> (1992). "Rapid solutions of problems by quantum computation". Proceedings of the Royal Society of London A. 439: 553–558. [doi:10.1098/rspa.1992.0167](https://doi.org/10.1098%2Frspa.1992.0167). # 2. <NAME>; <NAME>; <NAME>; <NAME> (1998). "Quantum algorithms revisited". Proceedings of the Royal Society of London A. 454: 339–354. [doi:10.1098/rspa.1998.0164](https://doi.org/10.1098%2Frspa.1998.0164). import qiskit qiskit.__qiskit_version__
content/ch-algorithms/deutsch-jozsa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Import libraries # + import pandas as pd import numpy as np import ast from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, confusion_matrix, roc_auc_score, roc_curve from sklearn.linear_model import LogisticRegression import seaborn as sns import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (16, 12) # - import emoji # ## Raw data reading # + media_info = pd.read_csv('dataset/media_info.csv') columns_info = ['id', 'media_type', 'comment_count', 'like_count', 'caption', 'product_type'] media_info = media_info[columns_info] print(media_info.shape) media_info = pd.concat([media_info[['id', 'media_type', 'comment_count', 'like_count', 'product_type']], pd.DataFrame(media_info['caption'].apply(ast.literal_eval).tolist())[['text', 'created_at']]], axis=1) media_info.text = media_info.text.apply(lambda x: x.replace(u"\t", ' ').replace(u"\n", ' ')) media_info.to_excel('excel/media_info.xlsx', index=False) media_info # + import re def get_hashtags(text): return re.findall(r"#(\w+)", str(text)) hash_list = [] for row in media_info[['id', 'text']].values: hashes = get_hashtags(row[1]) if len(hashes) > 0: for k in hashes: hash_list.append([row[0], k]) pd.DataFrame(hash_list, columns=['id', 'hashtags']).to_excel('excel/instagram_hashtags.xlsx', index=False) # - media_info_comments = pd.read_csv('dataset/media_info_comments.csv')[['media_id', 'user_id', 'text', 'type', 'created_at', 'comment_like_count']] media_info_comments.comment_like_count = media_info_comments.comment_like_count.fillna(0) media_info_comments.text = media_info_comments.text.apply(lambda x: x.replace(u"\t", ' ').replace(u"\n", ' ')) media_info_comments.to_excel('excel/media_info_comments.xlsx', index=False) media_info_comments # + active="" # from kaznlp.normalization.ininorm import Normalizer # # ininormer = Normalizer() # norm_def = lambda x: ininormer.normalize(x, desegment=2, dedupe=2, stats=False) # - # ## Reading translated Data media_i = pd.read_csv('translated/media_info.txt', sep='\t', encoding='utf8').drop('product_type', axis=1) media_i media_c = pd.read_csv('translated/media_comments.txt', sep='\t', encoding='utf8') media_c # + comment_df = pd.concat([media_i.set_index('id').loc[media_c.media_id], media_c.set_index('media_id')], axis=1) comment_df['media_id'] = comment_df.index comment_df = comment_df[['media_id', 'media_type', 'comment_count', 'media_like_count', 'media_text', 'media_created_at', 'user_id', 'comment_text', 'comment_type', 'comment_created_at', 'comment_like_count', ]] comment_df.media_created_at = comment_df.media_created_at.fillna(0).astype('int64') comment_df.index = range(comment_df.shape[0]) comment_df # - # ## Preprocessing # + import re import emoji def cleaning_data(text): # if text.split(' ')[0][0] == "@": # text = ' '.join(text.split(" ")[1:]) text = re.sub(r'^https?:\/\/.*[\r\n]*', ' ', text) text = str(text).replace('(<br/>)', '') text = text.replace('(<a).*(>).*(</a>)', '') text = text.replace('(&amp)', '') text = text.replace('(&gt)', '') text = text.replace('(&lt)', '') text = text.replace('(\xa0)', ' ') text = text.replace('-', ' ') text = text.replace('(', ' ') text = text.replace(')', ' ') # text = re.sub('[^А-Я,а-я,Ә,І,Ң,Ғ,Ү,Ұ,Қ,Ө,Һ,ә,і,ə,ң,ғ,ү,ұ,қ,ө,һ]', ' ', str(text).replace('-', '')) text = re.sub('_', '', text) text = re.sub('\s+', ' ', text) text = emoji.emojize(emoji.demojize(text).replace('::', ': :')) return str(text).lower().strip() # - emoji.demojize('😍😍😍😍') emoji.emojize(':smiling_face_with_heart-eyes::smiling_face_with_heart-eyes::smiling_face_with_heart-eyes::smiling_face_with_heart-eyes:'.replace('::', ': :')) comment_df['media_text'] = comment_df.media_text.astype(str).apply(cleaning_data) comment_df['comment_text'] = comment_df.comment_text.astype(str).apply(cleaning_data) comment_df.head() filtired_df = comment_df[comment_df['comment_text'].apply(lambda x: True if len(x.split(' ')) > 0 else False)] filtired_df from datetime import datetime filtired_df['post_datetime'] = filtired_df.media_created_at.apply(lambda x: datetime.utcfromtimestamp(x).strftime('%d.%m.%Y %H:%M:%S')) filtired_df['comment_datetime'] = filtired_df.comment_created_at.apply(lambda x: datetime.utcfromtimestamp(x).strftime('%d.%m.%Y %H:%M:%S')) filtired_df filtired_df.to_excel('excel/insta_comments.xlsx') # ## Sentimental model classifier # + from dostoevsky.tokenization import RegexTokenizer from dostoevsky.models import FastTextSocialNetworkModel tokenizer = RegexTokenizer() model = FastTextSocialNetworkModel(tokenizer=tokenizer) # + results = pd.DataFrame(model.predict(filtired_df.comment_text.astype(str))) results['media_id'] = filtired_df['media_id'].values results['media_type'] = filtired_df['media_type'].astype('int64').values results['comment_count'] = filtired_df['comment_count'].values results['media_like_count'] = filtired_df['media_like_count'].values results['media_created_at'] = filtired_df['media_created_at'].values results['post_text'] = filtired_df['media_text'].astype(str).apply(cleaning_data).values results['comment_text'] = filtired_df['comment_text'].astype(str).apply(cleaning_data).values results['comment_type'] = filtired_df['comment_type'].values results['comment_created_at'] = filtired_df['comment_created_at'].values results['comment_like_count'] = filtired_df['comment_like_count'].values results = results[['media_id', 'media_type', 'comment_count', 'media_like_count', 'media_created_at', 'post_text', 'comment_text', 'comment_type', 'comment_created_at', 'comment_like_count', 'negative', 'positive', 'skip', 'neutral', 'speech', ]] results['negative_prob'] = (results.negative / results.loc[:, ['negative', 'positive', 'skip', 'neutral', 'speech',]].sum(axis=1)) * 100 results['positive_prob'] = (results.positive / results.loc[:, ['negative', 'positive', 'skip', 'neutral', 'speech',]].sum(axis=1)) * 100 results['negative_boolean'] = results.negative_prob > results.positive_prob results # - toreint = lambda x: int(re.sub('[^0-9]', '', str(x))) results.media_type = results.media_type.apply(toreint) results.comment_count = results.comment_count.apply(toreint) results.media_like_count = results.media_like_count.apply(toreint) results.media_created_at = results.media_created_at.apply(toreint) results.comment_type = results.comment_type.apply(toreint) results.comment_created_at = results.comment_created_at.apply(toreint) results.comment_like_count = results.comment_like_count.apply(toreint) results = results.fillna(0) results results results.sort_values('negative_prob')[-100:] # + active="" # results[results.negative_boolean].sort_values('negative_prob')[-50:].comment_text.tolist() # + active="" # # results.loc[(-(-results[['negative_prob']]).sort_values('negative_prob')).index].to_excel('excel/insta_sentiment_sorted_negative.xlsx') # results.to_excel('excel/insta_sentiment.xlsx') # + active="" # results.loc[(-(-results[['positive_prob']]).sort_values('positive_prob')).index].to_excel('excel/insta_sentiment_sorted_positive.xlsx') # - # ## Top10 words for posts # + from nltk.stem.snowball import SnowballStemmer stemmer = SnowballStemmer("russian") import stanza nlp = stanza.Pipeline(lang='ru', processors = "tokenize,lemma", tokenize_batch_size=16) def stemming(sentences): return ' '.join([stemmer.stem(word) for word in sentences.split()]) def lemmatizing(text): return ' '.join([j.lemma for i in nlp(text).sentences for j in i.words]) # - # %%time lemmated = results.drop_duplicates('media_id').post_text.apply(lemmatizing) # + from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(stop_words=[lemmatizing(word) for word in stopwords.words('russian')]+[str(i) for i in range(200)]) X_tfidf = tfidf.fit_transform(lemmated).toarray() vocab = tfidf.vocabulary_ reverse_vocab = {v:k for k,v in vocab.items()} feature_names = tfidf.get_feature_names() idx = (-X_tfidf).argsort(axis=1) tfidf_max10 = idx[:, :20] tfidf_weight = -np.sort(-X_tfidf, axis=1)[:, :20] df_tfidf = pd.DataFrame([[reverse_vocab.get(item) for item in row] for row in tfidf_max10]) cl_names = ['top_' + str(i+1) for i in range(20)] df_tfidf.columns = cl_names df_tfidf['media_id'] = results.drop_duplicates('media_id')['media_id'].values df_tfidf = df_tfidf.set_index('media_id') df_tfidf = df_tfidf[cl_names] df_tfidf[['weight_' + str(i+1) for i in range(20)]] = -np.sort(-X_tfidf, axis=1)[:, :20] df_tfidf = df_tfidf[np.array([[df_tfidf.columns[i], df_tfidf.columns[i+20]] for i in range(20)]).reshape(20*2)] df_tfidf # + active="" # pd.DataFrame(np.dstack([np.array([[i]*20 for i in df_tfidf.index.tolist()]).reshape((411, 20, 1)), # df_tfidf.values.reshape((411, 20, 2))]).reshape((8220, 3)), columns=['post_id', 'word', 'weight']).to_excel('excel/insta_3dtopweitghts.xlsx') # + active="" # df_tfidf.to_excel('excel/insta_mediapost_20weights.xlsx') # + active="" # pd.concat([results.groupby('media_id').mean().loc[:,['negative_prob', 'positive_prob']], # df_tfidf], axis=1).to_excel('excel/insta_mediapost_tfidf.xlsx') # - # ## Clustering posts_dataframe = results.drop_duplicates('media_id')[['media_id', 'post_text']] posts_dataframe = posts_dataframe.set_index('media_id') posts_dataframe train_df = pd.concat([posts_dataframe, results.groupby('media_id').mean().loc[:,['negative', 'positive', 'skip', 'speech',]]], axis=1) train_df # + from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer tfidf_clus = TfidfVectorizer(stop_words=[stemming(word) for word in stopwords.words('russian')], max_df=0.1, min_df=0.001, ) X_tfidf_clus = tfidf_clus.fit_transform(lemmated).toarray() print(X_tfidf_clus.shape) # + active="" # from sklearn.cluster import KMeans # from collections import Counter # # for i in range(2,21): # kmeans = KMeans(n_clusters=i, random_state=0).fit(np.hstack([X_tfidf_clus, train_df.loc[:, ['negative', 'positive', # 'skip', 'speech',]].values])) # prediction = kmeans.predict(np.hstack([X_tfidf_clus, train_df.loc[:, ['negative', 'positive', # 'skip', 'speech',]].values])) # print(i, Counter(prediction).most_common()) # + from sklearn.cluster import KMeans from collections import Counter cluster_count = 19 model = KMeans(n_clusters=cluster_count, random_state=0).fit(np.hstack([X_tfidf_clus, train_df.loc[:, ['negative', 'positive', 'skip', 'speech',]].values])) model_prediction = model.predict(np.hstack([X_tfidf_clus, train_df.loc[:, ['negative', 'positive', 'skip', 'speech',]].values])) print(Counter(model_prediction).most_common()) # - train_df['cluster_id'] = model_prediction train_df.to_excel('excel/insta_cluster_posts.xlsx') cluster_text = {} for i in range(cluster_count): cluster_text[i] = ' '.join(train_df[train_df.cluster_id == i].post_text.values) cluster_text.keys() # + tfidf_clus_top = TfidfVectorizer(stop_words=[stemming(word) for word in stopwords.words('russian')]+[str(i) for i in range(200)]) tfidf_clus_top_arr = tfidf_clus_top.fit_transform([lemmatizing(clusses) for clusses in list(cluster_text.values())]).toarray() vocab = tfidf_clus_top.vocabulary_ reverse_vocab = {v:k for k,v in vocab.items()} feature_names = tfidf_clus_top.get_feature_names() # + idx = (-tfidf_clus_top_arr).argsort(axis=1) tfidf_max10 = idx[:, :100] tfidf_weight = -np.sort(-tfidf_clus_top_arr, axis=1)[:, :100] df_tfidf_c = pd.DataFrame([[reverse_vocab.get(item) for item in row] for row in tfidf_max10]).T df_tfidf_c.index = ['top_' + str(i+1) for i in range(100)] df_tfidf_c.columns = ['cluster_' + str(i+1) for i in range(cluster_count)] df_tfidf_c = df_tfidf_c.T df_tfidf_c[['weight_' + str(i+1) for i in range(100)]] = tfidf_weight df_tfidf_c = df_tfidf_c[np.array([[df_tfidf_c.columns[i], df_tfidf_c.columns[i+100]] for i in range(100)]).reshape(100*2)] df_tfidf_c # + active="" # pd.DataFrame(np.dstack([np.array([[i]*100 for i in df_tfidf_c.index.tolist()]).reshape((19, 100, 1)), # df_tfidf_c.values.reshape((19, 100, 2))]).reshape((1900, 3)), columns=['cluster_id', 'word', 'weight']).to_excel('excel/insta_3dclusters.xlsx') # - df_tfidf_c.to_excel('excel/insta_cluster_top100weights.xlsx') last_otchet = results.drop_duplicates('media_id')[['media_id', 'media_type', 'comment_count', 'media_like_count', 'media_created_at', 'post_text', 'negative_prob', 'positive_prob']] last_otchet = last_otchet.set_index('media_id') last_otchet last_otchet['cluster_label'] = train_df.cluster_id last_otchet['top10keywords'] = df_tfidf.T.apply(lambda x: ', '.join(x)) last_otchet['cluster_top100keywords'] = last_otchet.cluster_label.apply(lambda x: df_tfidf_c.T.apply(lambda x: ', '.join(x))[x]) last_otchet # + from datetime import datetime last_otchet = pd.read_excel('excel/instagram_media.xlsx') last_otchet.media_created_at = last_otchet.media_created_at.apply(lambda x: datetime.utcfromtimestamp(x).strftime('%d.%m.%Y %H:%M:%S')) last_otchet # - last_otchet.to_excel('excel/instagram_media.xlsx')
inst_sentiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # It's Sunday morning, it's quiet and you wake up with a big smile on your face. Today is going to be a great day! Except, your phone rings, rather "internationally". You pick it up slowly and hear something really bizarre - "Bonjour, je suis Michele. Oops, sorry. I am Michele, your personal bank agent.". What could possibly be so urgent for someone from Switzerland to call you at this hour? "Did you authorize a transaction for $3,358.65 for 100 copies of Diablo 3?" Immediately, you start thinking of ways to explain why you did that to your loved one. "No, I didn't !?". Michele's answer is quick and to the point - "Thank you, we're on it". Whew, that was close! But how did Michele knew that this transaction was suspicious? After all, you did order 10 new smartphones from that same bank account, last week - Michele didn't call then. # # ![](https://cdn.tutsplus.com/net/uploads/legacy/2061_stripe/1.png) # # Annual global fraud losses reached $21.8 billion in 2015, according to [Nilson Report](https://www.nilsonreport.com/upload/content_promo/The_Nilson_Report_10-17-2016.pdf). # # Probably you feel very lucky if you are a fraud. About every 12 cents per $100 were stolen in the US during the same year. Our friend Michele might have a serious problem to solve here. # # In this part of the series, we will train an Autoencoder Neural Network (implemented in Keras) in unsupervised (or semi-supervised) fashion for Anomaly Detection in credit card transaction data. The trained model will be evaluated on pre-labeled and anonymized dataset. # # # Setup # # We will be using TensorFlow 1.2 and Keras 2.0.4. Let's begin: # + import pandas as pd import numpy as np import pickle import matplotlib.pyplot as plt from scipy import stats import tensorflow as tf import seaborn as sns from matplotlib import rcParams from sklearn.model_selection import train_test_split from keras.models import Model, load_model from keras.layers import Input, Dense from keras.callbacks import ModelCheckpoint, TensorBoard from keras import regularizers # %matplotlib inline sns.set(style='whitegrid', palette='muted', font_scale=1.5) rcParams['figure.figsize'] = 14, 8 RANDOM_SEED = 42 LABELS = ["Normal", "Fraud"] # - # # Loading the data # # The dataset we're going to use can be downloaded from [Kaggle](https://www.kaggle.com/dalpozz/creditcardfraud). It contains data about credit card transactions that occurred during a period of two days, with 492 frauds out of 284,807 transactions. # # All variables in the dataset are numerical. The data has been transformed using PCA transformation(s) due to privacy reasons. The two features that haven't been changed are Time and Amount. Time contains the seconds elapsed between each transaction and the first transaction in the dataset. df = pd.read_csv("data/creditcard.csv") # # Exploration df.shape # 31 columns, 2 of which are Time and Amount. The rest are output from the PCA transformation. Let's check for missing values: df.isnull().values.any() count_classes = pd.value_counts(df['Class'], sort = True) count_classes.plot(kind = 'bar', rot=0) plt.title("Transaction class distribution") plt.xticks(range(2), LABELS) plt.xlabel("Class") plt.ylabel("Frequency"); # We have a highly imbalanced dataset on our hands. Normal transactions overwhelm the fraudulent ones by a large margin. Let's look at the two types of transactions: frauds = df[df.Class == 1] normal = df[df.Class == 0] frauds.shape normal.shape # How different are the amount of money used in different transaction classes? frauds.Amount.describe() normal.Amount.describe() # Let's have a more graphical representation: # + f, (ax1, ax2) = plt.subplots(2, 1, sharex=True) f.suptitle('Amount per transaction by class') bins = 50 ax1.hist(frauds.Amount, bins = bins) ax1.set_title('Fraud') ax2.hist(normal.Amount, bins = bins) ax2.set_title('Normal') plt.xlabel('Amount ($)') plt.ylabel('Number of Transactions') plt.xlim((0, 20000)) plt.yscale('log') plt.show(); # - # Do fraudulent transactions occur more often during certain time? # + f, (ax1, ax2) = plt.subplots(2, 1, sharex=True) f.suptitle('Time of transaction vs Amount by class') ax1.scatter(frauds.Time, frauds.Amount) ax1.set_title('Fraud') ax2.scatter(normal.Time, normal.Amount) ax2.set_title('Normal') plt.xlabel('Time (in Seconds)') plt.ylabel('Amount') plt.show() # - # Doesn't seem like the time of transaction really matters. # # # Autoencoders # # Autoencoders can seem quite bizarre at first. The job of those models is to predict the input, given that same input. Puzzling? Definitely was for me, the first time I heard it. # # More specifically, let’s take a look at Autoencoder Neural Networks. This autoencoder tries to learn to approximate the following identity function: # # $$\textstyle f_{W,b}(x) \approx x$$ # # While trying to do just that might sound trivial at first, it is important to note that we want to learn a compressed representation of the data, thus find structure. This can be done by limiting the number of hidden units in the model. Those kind of autoencoders are called *undercomplete*. # # Here's a visual representation of what an Autoencoder might learn: # # ![](http://curiousily.com/assets/12.what_to_do_when_data_is_missing_part_ii_files/mushroom_encoder.png) # # ## Reconstruction error # # We optimize the parameters of our Autoencoder model in such way that a special kind of error - reconstruction error is minimized. In practice, the traditional squared error is often used: # # $$\textstyle L(x,x') = ||\, x - x'||^2$$ # # If you want to learn more about Autoencoders I highly recommend the following videos by <NAME>: # # <iframe width="100%" height="480" src="https://www.youtube.com/embed/FzS3tMl4Nsc" frameborder="0" allowfullscreen></iframe> # # Preparing the data # # First, let's drop the Time column (not going to use it) and use the scikit's StandardScaler on the Amount. The scaler removes the mean and scales the values to unit variance: # + from sklearn.preprocessing import StandardScaler data = df.drop(['Time'], axis=1) data['Amount'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) # - # Training our Autoencoder is gonna be a bit different from what we are used to. Let's say you have a dataset containing a lot of non fraudulent transactions at hand. You want to detect any anomaly on new transactions. We will create this situation by training our model on the normal transactions, only. Reserving the correct class on the test set will give us a way to evaluate the performance of our model. We will reserve 20% of our data for testing: # + X_train, X_test = train_test_split(data, test_size=0.2, random_state=RANDOM_SEED) X_train = X_train[X_train.Class == 0] X_train = X_train.drop(['Class'], axis=1) y_test = X_test['Class'] X_test = X_test.drop(['Class'], axis=1) X_train = X_train.values X_test = X_test.values # - X_train.shape # # Building the model # # Our Autoencoder uses 4 fully connected layers with 14, 7, 7 and 29 neurons respectively. The first two layers are used for our encoder, the last two go for the decoder. Additionally, L1 regularization will be used during training: input_dim = X_train.shape[1] encoding_dim = 14 # + input_layer = Input(shape=(input_dim, )) encoder = Dense(encoding_dim, activation="tanh", activity_regularizer=regularizers.l1(10e-5))(input_layer) encoder = Dense(int(encoding_dim / 2), activation="relu")(encoder) decoder = Dense(int(encoding_dim / 2), activation='tanh')(encoder) decoder = Dense(input_dim, activation='relu')(decoder) autoencoder = Model(inputs=input_layer, outputs=decoder) # - # Let's train our model for 100 epochs with a batch size of 32 samples and save the best performing model to a file. The ModelCheckpoint provided by Keras is really handy for such tasks. Additionally, the training progress will be exported in a format that TensorBoard understands. # + nb_epoch = 20 #100 batch_size = 32 autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) checkpointer = ModelCheckpoint(filepath="model.h5", verbose=0, save_best_only=True) tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True) history = autoencoder.fit(X_train, X_train, epochs=nb_epoch, batch_size=batch_size, shuffle=True, validation_data=(X_test, X_test), verbose=1, callbacks=[checkpointer, tensorboard]).history # - autoencoder = load_model('model.h5') # # Evaluation plt.plot(history['loss']) plt.plot(history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper right'); # The reconstruction error on our training and test data seems to converge nicely. Is it low enough? Let's have a closer look at the error distribution: predictions = autoencoder.predict(X_test) mse = np.mean(np.power(X_test - predictions, 2), axis=1) error_df = pd.DataFrame({'reconstruction_error': mse, 'true_class': y_test}) error_df.describe() # ## Reconstruction error without fraud fig = plt.figure() ax = fig.add_subplot(111) normal_error_df = error_df[(error_df['true_class']== 0) & (error_df['reconstruction_error'] < 10)] _ = ax.hist(normal_error_df.reconstruction_error.values, bins=10) # ## Reconstruction error with fraud fig = plt.figure() ax = fig.add_subplot(111) fraud_error_df = error_df[error_df['true_class'] == 1] _ = ax.hist(fraud_error_df.reconstruction_error.values, bins=10) from sklearn.metrics import (confusion_matrix, precision_recall_curve, auc, roc_curve, recall_score, classification_report, f1_score, precision_recall_fscore_support) # ROC curves are very useful tool for understanding the performance of binary classifiers. However, our case is a bit out of the ordinary. We have a very imbalanced dataset. Nonetheless, let's have a look at our ROC curve: # + # Drop NaN values error_df.dropna(inplace=True) fpr, tpr, thresholds = roc_curve(error_df.true_class, error_df.reconstruction_error) roc_auc = auc(fpr, tpr) plt.title('Receiver Operating Characteristic') plt.plot(fpr, tpr, label='AUC = %0.4f'% roc_auc) plt.legend(loc='lower right') plt.plot([0,1],[0,1],'r--') plt.xlim([-0.001, 1]) plt.ylim([0, 1.001]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show(); # - # The ROC curve plots the true positive rate versus the false positive rate, over different threshold values. Basically, we want the blue line to be as close as possible to the upper left corner. While our results look pretty good, we have to keep in mind of the nature of our dataset. ROC doesn't look very useful for us. Onward... # # ## Precision vs Recall # # <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/2/26/Precisionrecall.svg/350px-Precisionrecall.svg.png" /> # # Precision and recall are defined as follows: # # $$\text{Precision} = \frac{\text{true positives}}{\text{true positives} + \text{false positives}}$$ # # $$\text{Recall} = \frac{\text{true positives}}{\text{true positives} + \text{false negatives}}$$ # # Let's take an example from Information Retrieval in order to better understand what precision and recall are. Precision measures the relevancy of obtained results. Recall, on the other hand, measures how many relevant results are returned. Both values can take values between 0 and 1. You would love to have a system with both values being equal to 1. # # Let's return to our example from Information Retrieval. High recall but low precision means many results, most of which has low or no relevancy. When precision is high but recall is low we have the opposite - few returned results with very high relevancy. Ideally, you would want high precision and high recall - many results with that are highly relevant. precision, recall, th = precision_recall_curve(error_df.true_class, error_df.reconstruction_error) plt.plot(recall, precision, 'b', label='Precision-Recall curve') plt.title('Recall vs Precision') plt.xlabel('Recall') plt.ylabel('Precision') plt.show() # A high area under the curve represents both high recall and high precision, where high precision relates to a low false positive rate, and high recall relates to a low false negative rate. High scores for both show that the classifier is returning accurate results (high precision), as well as returning a majority of all positive results (high recall). plt.plot(th, precision[1:], 'b', label='Threshold-Precision curve') plt.title('Precision for different threshold values') plt.xlabel('Threshold') plt.ylabel('Precision') plt.show() # You can see that as the reconstruction error increases our precision rises as well. Let's have a look at the recall: plt.plot(th, recall[1:], 'b', label='Threshold-Recall curve') plt.title('Recall for different threshold values') plt.xlabel('Reconstruction error') plt.ylabel('Recall') plt.show() # Here, we have the exact opposite situation. As the reconstruction error increases the recall decreases. # # Prediction # # Our model is a bit different this time. It doesn't know how to predict new values. But we don't need that. In order to predict whether or not a new/unseen transaction is normal or fraudulent, we'll calculate the reconstruction error from the transaction data itself. If the error is larger than a predefined threshold, we'll mark it as a fraud (since our model should have a low error on normal transactions). Let's pick that value: threshold = 2.9 # And see how well we're dividing the two types of transactions: # + groups = error_df.groupby('true_class') fig, ax = plt.subplots() for name, group in groups: ax.plot(group.index, group.reconstruction_error, marker='o', ms=3.5, linestyle='', label= "Fraud" if name == 1 else "Normal") ax.hlines(threshold, ax.get_xlim()[0], ax.get_xlim()[1], colors="r", zorder=100, label='Threshold') ax.legend() plt.title("Reconstruction error for different classes") plt.ylabel("Reconstruction error") plt.xlabel("Data point index") plt.show(); # - # I know, that chart might be a bit deceiving. Let's have a look at the confusion matrix: # + y_pred = [1 if e > threshold else 0 for e in error_df.reconstruction_error.values] conf_matrix = confusion_matrix(error_df.true_class, y_pred) plt.figure(figsize=(12, 12)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d"); plt.title("Confusion matrix") plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() # - # Our model seems to catch a lot of the fraudulent cases. Of course, there is a catch (see what I did there?). The number of normal transactions classified as frauds is really high. Is this really a problem? Probably it is. You might want to increase or decrease the value of the threshold, depending on the problem. That one is up to you. # # # Conclusion # # We've created a very simple Deep Autoencoder in Keras that can reconstruct what non fraudulent transactions looks like. Initially, I was a bit skeptical about whether or not this whole thing is gonna work out, bit it kinda did. Think about it, we gave a lot of one-class examples (normal transactions) to a model and it learned (somewhat) how to discriminate whether or not new examples belong to that same class. Isn't that cool? Our dataset was kind of magical, though. We really don't know what the original features look like. # # Keras gave us very clean and easy to use API to build a non-trivial Deep Autoencoder. You can search for TensorFlow implementations and see for yourself how much boilerplate you need in order to train one. Can you apply a similar model to a different problem? # # # References # # - [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html) # - [Stanford tutorial on Autoencoders](http://ufldl.stanford.edu/tutorial/unsupervised/Autoencoders/) # - [Stacked Autoencoders in TensorFlow](http://cmgreen.io/2016/01/04/tensorflow_deep_autoencoder.html)
fraud_detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="RsR6wyMG7x_L" colab_type="text" # ### author: <EMAIL> # > 0.40 Dropout, Augmentation, Histogram Equalization pre-processing # + id="B207nogV834C" colab_type="code" colab={} import os # cleaning up unimportant files def del_file(f_name): try: os.remove(f_name) except: print('file not found') # + id="_OuVb1eD9CgJ" colab_type="code" outputId="9ebea7fc-e6f8-412e-8f3f-2e054e091230" colab={"base_uri": "https://localhost:8080/", "height": 53} % cd /content/ # ! ls # + id="7IBel4C17Mmn" colab_type="code" outputId="e098ecbc-4b83-481d-ccc7-29dbb0c55f8f" colab={"base_uri": "https://localhost:8080/", "height": 53} # get the dataset # !wget https://challenge.kitware.com/api/v1/item/5ac37a9d56357d4ff856e176/download # + id="091AlZxi8CmT" colab_type="code" outputId="6afc8048-9d8d-494a-f5e4-dcb020693b40" colab={"base_uri": "https://localhost:8080/", "height": 35} # !ls # + id="AQfh295Mfhf5" colab_type="code" outputId="7bd0eedc-c1bb-442b-ff79-92f6a2c69d63" colab={"base_uri": "https://localhost:8080/", "height": 35} del_file('download.2') # + id="JwTZqwwdfnaT" colab_type="code" outputId="b4a99045-9d48-41bf-c8b9-b80b75877a39" colab={"base_uri": "https://localhost:8080/", "height": 35} # !ls # + id="EqOhKHQN8Ykc" colab_type="code" colab={} f = open('wget-log', 'r') log_c = f.read() # print(log_c) # download history # + id="jdR7RWBi-job" colab_type="code" colab={} # unzip to train_imgs folder import zipfile zip_ref = zipfile.ZipFile('download', 'r') zip_ref.extractall('train_imgs') zip_ref.close() # + id="zGHjmxQA_Buc" colab_type="code" outputId="b26905f3-7885-41e5-a5a0-cb30296c0de5" colab={"base_uri": "https://localhost:8080/", "height": 35} # !ls # + id="XxQfDjFz_ljU" colab_type="code" outputId="e2a07dc8-54d5-4965-d25b-3adb6956771d" colab={"base_uri": "https://localhost:8080/", "height": 35} print(os.stat('download').st_size/1000000000) # GigaBytes (approx) # + id="0Fw6qhY3_u7m" colab_type="code" outputId="c81a4648-3e9b-4cf1-f083-52a2d7b14d9b" colab={"base_uri": "https://localhost:8080/", "height": 53} # get the segmentation masks # !wget https://challenge.kitware.com/api/v1/item/5ac3695656357d4ff856e16a/download # + id="YIF19STu_zWV" colab_type="code" colab={} zip_ref = zipfile.ZipFile('download.1', 'r') zip_ref.extractall('train_masks') zip_ref.close() # + id="zaMP_8sO_4VO" colab_type="code" outputId="a6aca3b2-c6fc-4bc8-9111-2dac4fb9bac6" colab={"base_uri": "https://localhost:8080/", "height": 35} # %cd /content/ # + id="HlyJQOZY-JTs" colab_type="code" outputId="f1b6bb3d-41b2-4654-b832-e0d165e3221f" colab={"base_uri": "https://localhost:8080/", "height": 53} # !ls # + id="5QDlK4In-X_F" colab_type="code" colab={} # use it carefully import shutil try: #shutil.rmtree('/content/train_masks') #shutil.rmtree('/content/train_imgs') #shutil.rmtree('/content/wget-log') #shutil.rmtree('/content/wget-log.1') pass except: print('folder not found') # + id="63G0NlrCADTf" colab_type="code" outputId="7ea0d156-346a-40ae-cf4c-471328a58a23" colab={"base_uri": "https://localhost:8080/", "height": 35} print(os.stat('download.1').st_size/1000000000) # GigaBytes (approx) # + id="rJHTFPshAQRm" colab_type="code" colab={} # To reproduce the results later from numpy.random import seed seed(1997) from tensorflow import set_random_seed set_random_seed(1997) # + id="AQQceuUGA5YM" colab_type="code" colab={} # load the images and the masks import glob img_files = [] for files in glob.glob('/content/train_imgs/ISIC2018_Task1-2_Training_Input/*.jpg'): img_files.append(files) mask_files = [] for files in glob.glob('/content/train_masks/ISIC2018_Task1_Training_GroundTruth/*.png'): mask_files.append(files) # + id="UtfwcD58rJvy" colab_type="code" colab={} img_files.sort() mask_files.sort() # + id="ZqwcK-tpBixW" colab_type="code" outputId="fb2a6ce5-c6bd-45ab-be22-295368709967" colab={"base_uri": "https://localhost:8080/", "height": 90} print(len(img_files)) print(len(mask_files)) print(img_files[0]) print(mask_files[0]) # + id="sSB74eBDpjFg" colab_type="code" outputId="1680283b-cccf-41f8-f324-fabc09627cc6" colab={"base_uri": "https://localhost:8080/", "height": 72} img_id = 2531 old_name = mask_files[img_id] print(old_name) new_name = mask_files[img_id][:-17] + '.png' print(new_name) print(img_files[img_id]) # + id="1EAa99KapIDM" colab_type="code" colab={} # rename all the mask images, exclude _segmentation part, Augmentor for old_name in mask_files: new_name = old_name[:-17] + '.png' os.rename(old_name,new_name) # + id="vis1q1yG6XsS" colab_type="code" colab={} # rename image files to .png -> same extension -> augmentor for old_name in img_files: new_name = old_name[:-4] + '.png' os.rename(old_name,new_name) # + id="5ZF8TN52r5FK" colab_type="code" colab={} # load the images and the masks import glob img_files = [] for files in glob.glob('/content/train_imgs/ISIC2018_Task1-2_Training_Input/*.png'): img_files.append(files) mask_files = [] for files in glob.glob('/content/train_masks/ISIC2018_Task1_Training_GroundTruth/*.png'): mask_files.append(files) # + id="3aWOY58nsCjn" colab_type="code" colab={} img_files.sort() mask_files.sort() # + id="91DlRYRgr9a9" colab_type="code" outputId="62890649-c429-46cb-936d-7eb22314acbf" colab={"base_uri": "https://localhost:8080/", "height": 90} print(len(img_files)) print(len(mask_files)) print(img_files[527]) print(mask_files[527]) # + id="PZAb2B_xgAjD" colab_type="code" colab={} from PIL import Image import os for files in glob.glob('/content/train_imgs/ISIC2018_Task1-2_Training_Input/*.png'): #img_files.append(files) im = Image.open(files) f, e = os.path.splitext(files) imResize = im.resize((224,224), Image.ANTIALIAS) imResize.save(f + '.png', 'PNG', quality=100) for files in glob.glob('/content/train_masks/ISIC2018_Task1_Training_GroundTruth/*.png'): #mask_files.append(files) im = Image.open(files) f, e = os.path.splitext(files) imResize = im.resize((224,224), Image.ANTIALIAS) imResize.save(f + '.png', 'PNG', quality=100) # + id="ehAdAEX7Hl2_" colab_type="code" colab={} import glob img_files = [] for files in glob.glob('/content/train_imgs/ISIC2018_Task1-2_Training_Input/*.png'): img_files.append(files) mask_files = [] for files in glob.glob('/content/train_masks/ISIC2018_Task1_Training_GroundTruth/*.png'): mask_files.append(files) # + id="6qvtq69_fptW" colab_type="code" colab={} img_files.sort() mask_files.sort() # + id="qp6-oJFJHps7" colab_type="code" outputId="6b842484-0de5-428f-855c-a3ca31ac504a" colab={"base_uri": "https://localhost:8080/", "height": 90} print(len(img_files)) print(len(mask_files)) print(img_files[2593]) print(mask_files[2593]) # + id="ePC_Zej0HtTZ" colab_type="code" outputId="4f7a6cbe-ac52-44cb-fb36-c7b1a7891947" colab={"base_uri": "https://localhost:8080/", "height": 53} import cv2 a = cv2.imread('/content/train_imgs/ISIC2018_Task1-2_Training_Input/ISIC_0001103.png') print(a.shape) a = cv2.imread('/content/train_masks/ISIC2018_Task1_Training_GroundTruth/ISIC_0001103.png') print(a.shape) # + id="pvjGbmEcsnmN" colab_type="code" outputId="efd6f8e3-5f05-435f-e6d5-bc50b4f79a20" colab={"base_uri": "https://localhost:8080/", "height": 201} # install augmentation library # ! pip install Augmentor # + id="n8fJwk-a488h" colab_type="code" outputId="ca325c77-fd6f-47a2-ae39-61dac25af093" colab={"base_uri": "https://localhost:8080/", "height": 35} import shutil try: shutil.rmtree('/content/train_imgs/ISIC2018_Task1-2_Training_Input/output') except: print('folder not found') # + id="O8wepO9BoBck" colab_type="code" outputId="947b61cf-201d-497e-d44a-1b7858441509" colab={"base_uri": "https://localhost:8080/", "height": 53} # ! ls # + id="tRjA70Gxtcs6" colab_type="code" outputId="4814297e-8bab-4262-facf-1a0b59351d90" colab={"base_uri": "https://localhost:8080/", "height": 92} import Augmentor p = Augmentor.Pipeline("/content/train_imgs/ISIC2018_Task1-2_Training_Input/") # Point to a directory containing ground truth data. # Images with the same file names will be added as ground truth data # and augmented in parallel to the original data. p.ground_truth("/content/train_masks/ISIC2018_Task1_Training_GroundTruth/") # Add operations to the pipeline as normal: p.rotate(probability=0.2, max_left_rotation=5, max_right_rotation=5) p.flip_left_right(probability=0.2) p.zoom_random(probability=0.1, percentage_area=0.8) p.flip_top_bottom(probability=0.3) p.gaussian_distortion(probability=0.05, grid_width=4, grid_height=4, magnitude=3, corner='bell', method='in', mex=0.5, mey=0.5, sdx=0.05, sdy=0.05) p.random_brightness(probability=0.05, min_factor=0.7, max_factor=1.3) p.random_color(probability=0.05, min_factor=0.6, max_factor=0.9) p.random_contrast(probability=0.05, min_factor=0.6, max_factor=0.9) p.random_distortion(probability=0.2, grid_width=4, grid_height=4, magnitude=2) p.sample(2500) # change this number to generate different number of augmented variations # + id="KhMx3rvYnl6J" colab_type="code" outputId="d63b1280-fdf4-4227-c09e-95e0f6c6b12a" colab={"base_uri": "https://localhost:8080/", "height": 35} % cd /content # + id="i6atKaOfezCG" colab_type="code" outputId="4e7ebca9-bd55-4427-94e4-99b79e87e9b4" colab={"base_uri": "https://localhost:8080/", "height": 53} # ! ls # + id="bu2_nks5Otvl" colab_type="code" colab={} # % cd /content/train_imgs/ISIC2018_Task1-2_Training_Input/output # # ! ls # + id="bVsOhQH6iISE" colab_type="code" outputId="961a0aed-8030-46ec-a582-917563b5b374" colab={"base_uri": "https://localhost:8080/", "height": 35} len('/content/train_imgs/ISIC2018_Task1-2_Training_Input/output/') # + id="uQG5XeY3grPt" colab_type="code" colab={} aug_imgs = [] aug_masks = [] aug_path = '/content/train_imgs/ISIC2018_Task1-2_Training_Input/output/*.png' for fnam in glob.glob(aug_path): # print(fnam) if(fnam[59]=='_'): # find the proper index aug_masks.append(fnam) else: aug_imgs.append(fnam) # + id="Vqcn-ovjhXeh" colab_type="code" outputId="d274de02-c850-49d6-e8b3-fea0cf17d08c" colab={"base_uri": "https://localhost:8080/", "height": 110} aug_imgs.sort() aug_masks.sort() print(len(aug_imgs)) print(len(aug_masks)) print(aug_imgs[7]) print(aug_masks[7]) # + id="HJNI8jcTBnQP" colab_type="code" outputId="c42f2bb0-d080-4a73-9a75-36e77a0906b5" colab={"base_uri": "https://localhost:8080/", "height": 35} # reference: https://github.com/ZFTurbo/ZF_UNET_224_Pretrained_Model from keras.models import Model from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D from keras.layers.normalization import BatchNormalization from keras.layers.core import SpatialDropout2D, Activation from keras import backend as K from keras.layers.merge import concatenate from keras.utils.data_utils import get_file # Number of image channels (for example 3 in case of RGB, or 1 for grayscale images) INPUT_CHANNELS = 3 # Number of output masks (1 in case you predict only one type of objects) OUTPUT_MASK_CHANNELS = 1 # Pretrained weights def preprocess_input(x): x /= 255. # not sure about the mean of the image distribution return x def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) # +1 added to avoid 0/0 division return (2.0 * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0) def jacard_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + 1.0) def jacard_coef_loss(y_true, y_pred): return -jacard_coef(y_true, y_pred) def dice_coef_loss(y_true, y_pred): return -dice_coef(y_true, y_pred) def double_conv_layer(x, size, dropout=0.40, batch_norm=True): if K.image_dim_ordering() == 'th': axis = 1 else: axis = 3 conv = Conv2D(size, (3, 3), padding='same')(x) if batch_norm is True: conv = BatchNormalization(axis=axis)(conv) conv = Activation('relu')(conv) conv = Conv2D(size, (3, 3), padding='same')(conv) if batch_norm is True: conv = BatchNormalization(axis=axis)(conv) conv = Activation('relu')(conv) if dropout > 0: conv = SpatialDropout2D(dropout)(conv) return conv def UNET_224(dropout_val=0.40, weights=None): # No dropout by default if K.image_dim_ordering() == 'th': inputs = Input((INPUT_CHANNELS, 224, 224)) axis = 1 else: inputs = Input((224, 224, INPUT_CHANNELS)) axis = 3 filters = 32 conv_224 = double_conv_layer(inputs, filters) pool_112 = MaxPooling2D(pool_size=(2, 2))(conv_224) conv_112 = double_conv_layer(pool_112, 2*filters) pool_56 = MaxPooling2D(pool_size=(2, 2))(conv_112) conv_56 = double_conv_layer(pool_56, 4*filters) pool_28 = MaxPooling2D(pool_size=(2, 2))(conv_56) conv_28 = double_conv_layer(pool_28, 8*filters) pool_14 = MaxPooling2D(pool_size=(2, 2))(conv_28) conv_14 = double_conv_layer(pool_14, 16*filters) pool_7 = MaxPooling2D(pool_size=(2, 2))(conv_14) conv_7 = double_conv_layer(pool_7, 32*filters) up_14 = concatenate([UpSampling2D(size=(2, 2))(conv_7), conv_14], axis=axis) up_conv_14 = double_conv_layer(up_14, 16*filters) up_28 = concatenate([UpSampling2D(size=(2, 2))(up_conv_14), conv_28], axis=axis) up_conv_28 = double_conv_layer(up_28, 8*filters) up_56 = concatenate([UpSampling2D(size=(2, 2))(up_conv_28), conv_56], axis=axis) up_conv_56 = double_conv_layer(up_56, 4*filters) up_112 = concatenate([UpSampling2D(size=(2, 2))(up_conv_56), conv_112], axis=axis) up_conv_112 = double_conv_layer(up_112, 2*filters) up_224 = concatenate([UpSampling2D(size=(2, 2))(up_conv_112), conv_224], axis=axis) up_conv_224 = double_conv_layer(up_224, filters, dropout_val) conv_final = Conv2D(OUTPUT_MASK_CHANNELS, (1, 1))(up_conv_224) conv_final = Activation('sigmoid')(conv_final) model = Model(inputs, conv_final, name="UNET_224") return model # + id="gPqfE2nEVmDW" colab_type="code" colab={} # + id="9BzTNzeUqhAn" colab_type="code" colab={} #img_files.extend(aug_imgs) #mask_files.extend(aug_masks) # + id="wYKOFLToqyi9" colab_type="code" outputId="bdfe874f-1af9-4dd1-efc9-c415a00182d3" colab={"base_uri": "https://localhost:8080/", "height": 90} print(len(img_files)) print(len(mask_files)) print(img_files[2000]) print(mask_files[2000]) # + id="ev0U6jYGEf7l" colab_type="code" outputId="d45a3a8f-772e-486c-e4d3-64bdb587e1c9" colab={"base_uri": "https://localhost:8080/", "height": 35} import cv2 import random import numpy as np import pandas as pd from keras.optimizers import Adam, SGD from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau from keras import __version__ img_files.sort() mask_files.sort() print('Updated States ...') img_arr = [] mask_arr = [] num_train = len(img_files) global gen_batch # + id="cWgZy-nEFLL3" colab_type="code" outputId="dbd3bee5-46fa-4604-fa9e-842a09d665a5" colab={"base_uri": "https://localhost:8080/", "height": 53} # ! ls # + id="617NrhqSZ9oL" colab_type="code" colab={} import random random.seed(1997) # same seed again for i in range(num_train): a = cv2.imread(img_files[i]) a = cv2.resize(a, (224, 224)) a = np.array(a,dtype=np.uint8) img_arr.append(a) b = cv2.imread(mask_files[i],0) b = cv2.resize(b, (224, 224)) b = np.array(b,dtype=np.uint8) mask_arr.append(b) gen_batch = 0 fin_arr = list(zip(img_arr,mask_arr)) random.shuffle(fin_arr) img_arr = [x[0] for x in fin_arr] mask_arr = [x[1] for x in fin_arr] # + id="2Xh_BDZTb60g" colab_type="code" outputId="315be9ba-3c6d-4fc9-d579-b4f0765f22c4" colab={"base_uri": "https://localhost:8080/", "height": 144} tot_data = len(img_arr) print(tot_data) train_split = 0.7 validation_split = 0.7 + 0.15 test_split = 0.7 + 0.15 + 0.15 train_img = img_arr[:int(tot_data*train_split)] validation_img = img_arr[int(tot_data*train_split):int(tot_data*validation_split)] test_img = img_arr[int(tot_data*validation_split):] train_mask = mask_arr[:int(tot_data*train_split)] validation_mask = mask_arr[int(tot_data*train_split):int(tot_data*validation_split)] test_mask = mask_arr[int(tot_data*validation_split):] print(len(train_img)) print(len(train_mask)) print(len(validation_img)) print(len(validation_mask)) print(len(test_img)) print(len(test_mask)) # + id="-ykUy60h-aKj" colab_type="code" outputId="731a4899-0f19-479b-ae33-83928661b3ae" colab={"base_uri": "https://localhost:8080/", "height": 35} len(aug_imgs) # + id="qRvsN7lu-jcA" colab_type="code" outputId="ed217d37-e11e-403b-efc1-4232fbf6d559" colab={"base_uri": "https://localhost:8080/", "height": 35} len(aug_masks) # + id="dZ_pAKo3-Pcz" colab_type="code" colab={} # load augmented images num_train = len(aug_imgs) img_arr = [] mask_arr = [] for i in range(num_train): a = cv2.imread(aug_imgs[i]) a = cv2.resize(a, (224, 224)) a = np.array(a,dtype=np.uint8) img_arr.append(a) b = cv2.imread(aug_masks[i],0) b = cv2.resize(b, (224, 224)) b = np.array(b,dtype=np.uint8) mask_arr.append(b) gen_batch = 0 fin_arr = list(zip(img_arr,mask_arr)) random.shuffle(fin_arr) img_arr = [x[0] for x in fin_arr] mask_arr = [x[1] for x in fin_arr] # + id="woWRERmC-5-W" colab_type="code" outputId="ffa240ba-f91b-4610-cd4d-f3a2b594d29b" colab={"base_uri": "https://localhost:8080/", "height": 90} print(len(img_arr)) print(len(mask_arr)) print(img_arr[0].shape) print(mask_arr[0].shape) # + id="z2BUYED1_W5Y" colab_type="code" colab={} # use it carefully, num_samples*0.8 ns_t = int(len(aug_masks)*0.8) train_img.extend(img_arr[0:ns_t]) train_mask.extend(mask_arr[0:ns_t]) validation_img.extend(img_arr[ns_t:]) validation_mask.extend(mask_arr[ns_t:]) # + id="OBO6Xti__8ih" colab_type="code" outputId="134f89a9-4803-47ad-c6f4-daa5cc4e587b" colab={"base_uri": "https://localhost:8080/", "height": 90} print(len(train_img)) print(len(train_mask)) print(len(validation_img)) print(len(validation_mask)) # + id="dO_Xqh5cl7Q0" colab_type="code" colab={} train_img = np.array(train_img, dtype = 'float32') train_img = preprocess_input(train_img) validation_img = np.array(validation_img, dtype = 'float32') validation_img = preprocess_input(validation_img) test_img = np.array(test_img, dtype = 'float32') test_img = preprocess_input(test_img) # + id="E12UXFffmKtX" colab_type="code" outputId="01bc0fdb-55f6-46b7-9a09-149127770f1a" colab={"base_uri": "https://localhost:8080/", "height": 72} print(np.max(train_img[0])) print(np.max(validation_img[0])) print(np.max(test_img[0])) # + id="0XB4SosFm53Q" colab_type="code" colab={} train_mask = np.array(train_mask, dtype = 'float32') train_mask = preprocess_input(train_mask) validation_mask = np.array(validation_mask, dtype = 'float32') validation_mask = preprocess_input(validation_mask) test_mask = np.array(test_mask, dtype = 'float32') test_mask = preprocess_input(test_mask) # + id="3P6DfTVAnMEC" colab_type="code" outputId="d7e8ccea-1ce3-4f6e-e4f9-1ac3b7e82a8c" colab={"base_uri": "https://localhost:8080/", "height": 72} print(np.max(train_mask[0])) print(np.max(validation_mask[0])) print(np.max(test_mask[0])) # + id="-wBtMcrpb84P" colab_type="code" outputId="1f941b5d-0da0-420b-b173-7eff7c896bd9" colab={"base_uri": "https://localhost:8080/", "height": 53} print(train_img[0].shape) print(train_mask[0].shape) # + id="KuhBfzByqt0h" colab_type="code" outputId="27c8e97c-de7a-49b3-82c4-bee8ee7c31aa" colab={"base_uri": "https://localhost:8080/", "height": 53} print(train_mask.shape) n_s, r, c = train_mask.shape train_mask = train_mask.reshape(n_s, r, c, 1) print(train_mask.shape) # + id="yAaCjYAFrQJQ" colab_type="code" outputId="58bc6442-5678-43d7-bff4-8fa662a85c11" colab={"base_uri": "https://localhost:8080/", "height": 53} print(validation_mask.shape) n_s, r, c = validation_mask.shape validation_mask = validation_mask.reshape(n_s, r, c, 1) print(validation_mask.shape) # + id="YCH2DOSordWG" colab_type="code" outputId="4b0c7690-1118-42d4-d546-715d9e806570" colab={"base_uri": "https://localhost:8080/", "height": 53} print(test_mask.shape) n_s, r, c = test_mask.shape test_mask = test_mask.reshape(n_s, r, c, 1) print(test_mask.shape) # + id="SKaZ8rkXVwt-" colab_type="code" outputId="a659d207-3083-4849-85d3-ef1fe8c844d2" colab={"base_uri": "https://localhost:8080/", "height": 3693} from keras.models import load_model from keras.optimizers import Adam import os out_model_path = 'unet_224_final_model.h5' epochs = 100 patience = 20 batch_size = 32 optim_type = 'Adam' learning_rate = 0.001 model = UNET_224() if os.path.isfile(out_model_path): model.load_weights(out_model_path) if optim_type == 'SGD': optim = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True) else: optim = Adam(lr=learning_rate) model.compile(optimizer=optim, loss=dice_coef_loss, metrics=[dice_coef, jacard_coef]) model.summary() # + id="Pim9dR9ahYdx" colab_type="code" outputId="5de88bd8-6424-4002-9a21-d960452ea92a" colab={"base_uri": "https://localhost:8080/", "height": 7790} from keras.models import load_model import os out_model_path = 'unet_224_final_model.h5' epochs = 100 patience = 20 batch_size = 32 optim_type = 'Adam' learning_rate = 0.001 model = UNET_224() if os.path.isfile(out_model_path): model.load_weights(out_model_path) if optim_type == 'SGD': optim = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True) else: optim = Adam(lr=learning_rate) model.compile(optimizer=optim, loss=dice_coef_loss, metrics=[dice_coef, jacard_coef]) callbacks = [ ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=1e-9, epsilon=0.00001, verbose=1, mode='min'), #EarlyStopping(monitor='val_loss', patience=patience, verbose=0), ModelCheckpoint('unet_224_best_weight.h5', monitor='val_loss', save_best_only=True, verbose=1), ] print('Training Init()') history = model.fit( x = train_img, y = train_mask, batch_size = batch_size, epochs=epochs, #steps_per_epoch= 1815//batch_size, validation_data= (validation_img, validation_mask), #validation_steps= 389//16, verbose=1, callbacks=callbacks) model.save(out_model_path) # + id="5KZEtsPCpA9r" colab_type="code" outputId="7c427879-1139-4c43-b553-876df64baf8c" colab={"base_uri": "https://localhost:8080/", "height": 35} type(history.history) # + id="WIzoJ3X7Vs7_" colab_type="code" colab={} model_hist = history.history np.save('model.hist.npy',model_hist) # + id="6-2Sdfi7WF1V" colab_type="code" outputId="86917eea-cc3e-4187-c2c6-0353e2d79e1a" colab={"base_uri": "https://localhost:8080/", "height": 53} # !ls # + id="OqZYaxiWWID8" colab_type="code" colab={} from google.colab import files files.download('model.hist.npy') # + id="BRrK7OHqXBN9" colab_type="code" colab={} y_test = model.predict(test_img) # + id="Wy2bQ73IdoW9" colab_type="code" outputId="129f0d95-402d-40ca-eace-da05adda60b4" colab={"base_uri": "https://localhost:8080/", "height": 35} print(y_test.shape) # + id="wrEZ24q3iT7Z" colab_type="code" colab={} def dice_coef_eval(y_true, y_pred): y_p = y_pred.flatten() #print(y_p.shape) y_b = y_true.flatten() #print(y_b.shape) cc = np.dot(y_p,y_b) #print(cc) ps = np.sum(y_p) pb = np.sum(y_b) ds = (2.0*cc + 1.0)/(ps+pb+1.0) #print(type(ds)) return ds def jacard_coef_eval(y_true, y_pred): y_p = y_pred.flatten() #print(y_p.shape) y_b = y_true.flatten() #print(y_b.shape) cc = np.dot(y_p,y_b) #print(cc) ps = np.sum(y_p) pb = np.sum(y_b) js = (cc + 1.0)/(ps+pb-cc+1.0) #print(type(ds)) return js # + id="Vq-6j921mX-W" colab_type="code" colab={} # binary conversion y_test[y_test<0.5] = 0.0 y_test[y_test>=0.5] = 1.0 # + id="LF1P_8dyeHzG" colab_type="code" colab={} dice_score_test = [] jacard_score_test = [] n_s, r, c, ch = y_test.shape for i in range(n_s): dice_score_test.append(dice_coef_eval(y_test[i,:,:,:], test_mask[i,:,:,:])) jacard_score_test.append(jacard_coef_eval(y_test[i,:,:,:], test_mask[i,:,:,:])) # + id="ksp613FhmV5k" colab_type="code" outputId="d92da2e3-2147-4d41-f4c5-659cc58fb34a" colab={"base_uri": "https://localhost:8080/", "height": 73} print(dice_score_test) print(jacard_score_test) # + id="pdHz77cOfdeR" colab_type="code" outputId="477d89b0-3404-4712-b499-23c0697dd866" colab={"base_uri": "https://localhost:8080/", "height": 126} print(np.max(dice_score_test)) print(np.min(dice_score_test)) print(np.mean(dice_score_test)) print(np.max(jacard_score_test)) print(np.min(jacard_score_test)) print(np.mean(dice_score_test)) # + id="QfcI8ok_lzrW" colab_type="code" outputId="7c2ed0f8-31f5-45a9-97e2-3a7af1dae839" colab={"base_uri": "https://localhost:8080/", "height": 365} import matplotlib.pyplot as plt plt.plot(dice_score_test) # + id="AP7yqnGIkTGR" colab_type="code" outputId="2962738d-ba6d-4231-ac11-b51dac5e3d49" colab={"base_uri": "https://localhost:8080/", "height": 53} ind = np.unravel_index(np.argmin(dice_score_test, axis=None), len(dice_score_test)) print(ind) ind2 = np.unravel_index(np.argmin(jacard_score_test, axis=None), len(jacard_score_test)) print(ind2) # + id="kEJrja56k7U2" colab_type="code" outputId="e5017a2a-84f9-4eb5-bcd2-3f4f66ac2276" colab={"base_uri": "https://localhost:8080/", "height": 368} import matplotlib.pyplot as plt a = test_img[69] plt.imshow(a) # + id="BZB9-BhNlRbH" colab_type="code" outputId="2c6c4753-69a7-45ea-904b-8ed30f655f68" colab={"base_uri": "https://localhost:8080/", "height": 35} y_test.shape # + id="8ofECMQZlI6F" colab_type="code" outputId="189d98ba-482e-4fa6-d69a-03fbb6e2e54d" colab={"base_uri": "https://localhost:8080/", "height": 368} a = y_test[1,:,:,:].reshape(224,224) plt.imshow(a) # + id="GGIhyTNGnGwo" colab_type="code" colab={} np.save('img_test.npy', test_img) np.save('img_mask.npy', test_mask) np.save('y_predicted_test.npy', y_test) # + id="Iup8xznVqhDQ" colab_type="code" outputId="c63690c3-20d2-45eb-8025-0eb80d21fd58" colab={"base_uri": "https://localhost:8080/", "height": 90} # !ls # + id="xMd34ML2vyHm" colab_type="code" colab={} # Install the PyDrive wrapper & import libraries. # This only needs to be done once in a notebook. # !pip install -U -q PyDrive # + id="Z-3GrEBtt7zr" colab_type="code" outputId="58e4d420-bbf8-4846-b059-c5ed6004ab38" colab={"base_uri": "https://localhost:8080/", "height": 35} # saving to google drive for backup from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # Authenticate and create the PyDrive client. # This only needs to be done once in a notebook. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # Create & upload a file. uploaded = drive.CreateFile({'title': 'img_mask_drop_aug.npy'}) uploaded.SetContentFile('img_mask.npy') uploaded.Upload() print('Uploaded file with ID {}'.format(uploaded.get('id'))) # + id="AqHQk5_1wDv_" colab_type="code" outputId="9ad7431f-000f-4661-bb09-6e87e8d4eb7f" colab={"base_uri": "https://localhost:8080/", "height": 35} # Create & upload a file. uploaded = drive.CreateFile({'title': 'img_test_drop_aug.npy'}) uploaded.SetContentFile('img_test.npy') uploaded.Upload() print('Uploaded file with ID {}'.format(uploaded.get('id'))) # + id="5kANxGTSxknm" colab_type="code" outputId="d8a82f7e-fe2e-4340-8b48-464c0c62a1f3" colab={"base_uri": "https://localhost:8080/", "height": 35} # Create & upload a file. uploaded = drive.CreateFile({'title': 'y_predicted_test_drop_aug.npy'}) uploaded.SetContentFile('y_predicted_test.npy') uploaded.Upload() print('Uploaded file with ID {}'.format(uploaded.get('id'))) # + id="o9qLeQuMxv5g" colab_type="code" outputId="d5f134f2-9238-41c2-f23c-887aaeb083d7" colab={"base_uri": "https://localhost:8080/", "height": 35} # Create & upload a file. uploaded = drive.CreateFile({'title': 'model.hist_drop_aug.npy'}) uploaded.SetContentFile('model.hist.npy') uploaded.Upload() print('Uploaded file with ID {}'.format(uploaded.get('id'))) # + id="5wcLMET9yQFr" colab_type="code" outputId="c7219ade-cc7e-4c23-834e-94dd079f6a8c" colab={"base_uri": "https://localhost:8080/", "height": 35} # Create & upload a file. uploaded = drive.CreateFile({'title': 'unet_224_best_weight_drop_aug.h5'}) uploaded.SetContentFile('unet_224_best_weight.h5') uploaded.Upload() print('Uploaded file with ID {}'.format(uploaded.get('id'))) # + id="oGa25tzByeLk" colab_type="code" outputId="d5931f67-12a9-41de-ca60-f2e982a1578b" colab={"base_uri": "https://localhost:8080/", "height": 35} # Create & upload a file. uploaded = drive.CreateFile({'title': 'unet_224_final_model_drop_aug.h5'}) uploaded.SetContentFile('unet_224_final_model.h5') uploaded.Upload() print('Uploaded file with ID {}'.format(uploaded.get('id'))) # + id="6W1jVP6GyuEl" colab_type="code" colab={} # img_mask # https://drive.google.com/open?id=1t7-wJjzUteuGAOnVWeEnCsOIe7aj3U3g # img_test # https://drive.google.com/open?id=1XDzdT7ERkHeqrTg8JxGv9b_VVmdZONqM # model.history # https://drive.google.com/open?id=18ybbnkQ1yV-Z7TmnMOg12179EpkpraDg # unet_224_final_model # https://drive.google.com/open?id=1gETJeGjMOTUSbmwv553lZAGK5RkD_gm6 # unet_224_best_weight # https://drive.google.com/open?id=1cgE3duyxiyv6NgfT2hWE0yQoMPvCuGdx # + id="RyBfhO0h3c0S" colab_type="code" colab={}
U_Net_ISIC_DropAugPrep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="C-uA48UYt5vL" colab_type="code" outputId="49374e27-00e4-4c89-c7f0-64748a967cbf" colab={"base_uri": "https://localhost:8080/", "height": 586} # !rm -rf * # !pip install scipy==1.2.1 # !pip install ipython==7.4.0 # !pip install -U -q zipfile36 # !pip install -U -q PyDrive # !pip install -U -q hdbscan # !pip install -U -q dask_ml # !git clone https://github.com/kkahloots/Autoencoders.git # !mv ./Autoencoders/* ./ # !mkdir experiments import os os.kill(os.getpid(), 9) # + id="hVL-ET8ow3pA" colab_type="code" colab={} import warnings warnings.filterwarnings('ignore') # + [markdown] id="VXykg_XDzBmV" colab_type="text" # # + [markdown] id="ETK1WGdNw3pM" colab_type="text" # # Prepare the Dataset # + id="hs5yDozKw3pO" colab_type="code" colab={} dataset_name = 'mnist' # + id="69W646c1w3pR" colab_type="code" outputId="a0c1c70d-1339-411e-93df-3435f49bb341" colab={"base_uri": "https://localhost:8080/", "height": 54} import numpy as np from keras.datasets import mnist (X, y), (X_test, y_test) = mnist.load_data() X = np.concatenate((X, X_test)) y = np.concatenate((y, y_test)).flatten() imgs = X del X_test del y_test print('Dataset size {}'.format(X.shape)) # + [markdown] id="hQcelbXTw3pY" colab_type="text" # # AE # + id="eVjCGVmrw3pZ" colab_type="code" colab={} # %load_ext autoreload # %autoreload 2 # + id="L9WSDRjVw3pd" colab_type="code" colab={} model_name = 'AE' # + id="eY79Yd2pw-NE" colab_type="code" colab={} # + id="TIV6rGP_w3pg" colab_type="code" outputId="035fb9ce-c14d-423b-be85-917cb4ae89bb" colab={"base_uri": "https://localhost:8080/", "height": 110} from models.AE import AE # + id="rLpBPOVxw3pj" colab_type="code" colab={} ae = AE(dataset_name, epochs=int(2e5), num_layers=3, latent_dim=3, hidden_dim=500, l_rate=1e-4, batch_size=64, plot=True, clustering=True, colab=True, colabpath = '1w_Kz94Rcz_OuHUMlahEQMDQxGPrQuL7U') # + id="lAZPF3nuw3pn" colab_type="code" outputId="4f34e2a9-8ba7-4fc7-c54a-1327398fe0bc" colab={"base_uri": "https://localhost:8080/", "height": 52046} ae.fit(X,y) # + [markdown] id="xiD1iR8REJgb" colab_type="text" # # + id="0sp0htQMw3ps" colab_type="code" colab={} from IPython.display import display, Image # + id="ErS2Mja8w3pw" colab_type="code" colab={} imgs = ae.animate() # + id="cqg5pJjPwhQ9" colab_type="code" outputId="be533879-0e88-4353-df89-5771600991d1" colab={"base_uri": "https://localhost:8080/", "height": 35} # !ls experiments/AE_cifar10_latent_dim3_h_dim500_h_nl3 # + id="bHVfoYAtv_nO" colab_type="code" outputId="b2ecae0a-5783-4107-98c7-817b46fcb284" colab={"base_uri": "https://localhost:8080/", "height": 127} ae.model.w_space_files # + id="_LVXm6a6wp7m" colab_type="code" colab={} animates = ['experiments/AE_cifar10_latent_dim3_h_dim500_h_nl3/AE_cifar10_latent_dim3_h_dim500_h_nl3 W space 3d in epoch_res_animate.gif', 'experiments/AE_cifar10_latent_dim3_h_dim500_h_nl3/AE_cifar10_latent_dim3_h_dim500_h_nl3 samples generation in epoch_res_animate.gif', 'experiments/AE_cifar10_latent_dim3_h_dim500_h_nl3/AE_cifar10_latent_dim3_h_dim500_h_nl3 W space in epoch_res_animate.gif'] # + id="zbdtNDDHrZik" colab_type="code" outputId="c3d947ea-d5f0-4702-de3f-f145ba75da7e" colab={"base_uri": "https://localhost:8080/", "height": 418} import matplotlib.pyplot as plt import matplotlib.image as mpimg for animate in animates: image = mpimg.imread(animate) plt.imshow(image) plt.show() # + id="_sR00ZQqw3p4" colab_type="code" colab={} import matplotlib.pyplot as plt # + id="MuoyWfLtw3p6" colab_type="code" colab={} plt.imshow(X[0]) plt.axis('off') # + id="2Mj70Er6w3p-" colab_type="code" outputId="80d56673-9a90-470f-a2a5-a5b092c0c445" colab={"base_uri": "https://localhost:8080/", "height": 146} z1 = ae.encode(X[0:1]) # + id="GoLagF8Gw3qC" colab_type="code" outputId="279fa2fc-c1c3-45c6-a134-d0fba95dc219" colab={"base_uri": "https://localhost:8080/", "height": 35} z1.shape # + id="eDcKvEYisWkR" colab_type="code" colab={} # + id="DyFA-IVKw3qG" colab_type="code" outputId="1256cd05-c2b8-418a-db7e-1db15c141e5e" colab={"base_uri": "https://localhost:8080/", "height": 127} x1_const = ae.decode(z1) # + id="10nr3J8Hw3qK" colab_type="code" outputId="13c13bff-518e-4e36-a343-c1bb4f01f2e0" colab={"base_uri": "https://localhost:8080/", "height": 283} plt.imshow(x1_const[0]) plt.axis('off') # + id="b6Bq6fxlw3qN" colab_type="code" outputId="0097a699-0f2c-4757-d319-f64ae52bca8c" colab={"base_uri": "https://localhost:8080/", "height": 146} pred = ae.reconst_loss(X[0:100]) # + id="MN_p3sJHw3qR" colab_type="code" outputId="faa73b99-f944-4ba2-8ab5-026a1780bbc8" colab={"base_uri": "https://localhost:8080/", "height": 35} pred.shape # + id="V9id5Jgfw3qV" colab_type="code" outputId="88e5a690-354b-48e8-eb8d-799c1bd38aae" colab={"base_uri": "https://localhost:8080/", "height": 283} plt.imshow(X[1]) plt.axis('off') # + id="4XG93W3_w3qa" colab_type="code" outputId="cae087bf-b530-4d94-8ce1-2a66eb8cd4c0" colab={"base_uri": "https://localhost:8080/", "height": 1376} int_imgs = ae.interpolate(X[0:100], X[200:300]) # + id="2vAWh3ayw3qf" colab_type="code" outputId="d8e3d449-3ba9-47c1-f8d1-585ae6651182" colab={"base_uri": "https://localhost:8080/", "height": 54} # %load_ext autoreload # %autoreload 2 from utils.plots import merge from scipy.misc import imresize # + id="3-KOCI4Dw3qj" colab_type="code" outputId="3ca420f3-5d7c-415c-e8d4-c035691d1988" colab={"base_uri": "https://localhost:8080/", "height": 453} scale = 5 im = merge(np.vstack(int_imgs), (len(int_imgs),12)) fig_width = int(im.shape[0] * scale) fig_height = int(im.shape[1] * scale) im = imresize(im, (fig_width, fig_height, 3)) # + id="7kK_OPpZw3ql" colab_type="code" colab={} plt.figure(figsize = (len(imgs),24), dpi=70) plt.imshow(im) plt.axis('off') # + id="ZY3SbXMiw3qn" colab_type="code" colab={} import scipy.misc scipy.misc.imsave(ae.model.summary_dir+'\\interpolate.jpg', im) # + id="xCOM1cgOw3qq" colab_type="code" colab={} # + id="40maeuBSw3qr" colab_type="code" colab={} # + id="Njjp9pVEw3qt" colab_type="code" colab={}
GANs_on_mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import all required packages import pandas as pd import matplotlib.pyplot as plt import seaborn as sns netflix_titles = pd.read_csv('netflix_titles.csv', parse_dates=['date_added']) netflix_titles.head() recent_shows netflix_titles = netflix_titles[netflix_titles['date_added'].notna()] recent_shows = pd.DataFrame(netflix_titles[netflix_titles['release_year'] > 2015]) recent_shows movies = netflix_titles[netflix_titles['type'] == 'Movie'] tv_shows = netflix_titles[netflix_titles['type'] == 'TV Show'] # # Movies vs. TV Shows # # This dataset contains a total of 7787 shows; 5377 of them are movies while 2410 are TV shows. This seems to suggest that movies are more popular than TV shows on Netflix. # # However, an analysis of the trends over time shows that TV shows have become more and more popular. recent_movies = movies[movies['release_year'] > 2015] movie_trends = recent_movies.groupby('release_year')['type'].count() movie_trends.plot.bar(figsize=(6,4)) recent_tv_shows = tv_shows[tv_shows['release_year'] > 2015] tv_shows_trends = recent_tv_shows.groupby('release_year')['type'].count() #movie_trends.head() tv_shows_trends.plot.bar(figsize=(6,4)) # The 2 barplots above clearly show that the popularity of movies released in the last 20 years is declining while the TV shows are gaining some grounds. # # Conclusions: # Zuckflix should focus on the production of TV shows as their market and popularity is most likely going to keep growing in the next few years. # # All Shows Ratings # # Overall, shows rated as TV-MA, TV-14, TV-PG, R, and TV-Y seem to be the most popular although a few differences might arise depending on whether it's a movie or TV show. This is due to the fact that the R and PG-13 ratings are rare or non-existent for TV shows. # # # Recent Show Ratings # # Total number of shows since 2016 = 4910 # Number of TV Shows since 2016 = 1785 # Number of Movies since 2016 = 3125 # # Top 5 show ratings = TV-MA, TV-14, TV-PG, R, TV-Y # Top 5 movie ratings = TV-MA, TV-14, TV-PG, R, PG-13 # Top 5 tv show ratings = TV-MA, TV-14, TV-PG, TV-Y, TV-Y7 # # Conclusions: Zuckflix should focus on the top 3 ratings across categories: TV-MA, TV-14, TV-PG. Their content seem to be most popular. # # # Ratings over time # # For movies, # - decreasing trends = TV-MA, TV-14, TV-PG # - mixed trends = R, PG-13 # - increasing trends = TV-Y # # For TV shows, # - increasing trends = TV-MA, TV-G # - mixed trends = TV-14, TV-PG, TV-Y, TV-Y7 # # Overall, # - mixed trends = TV-MA, TV-14R, TV-PG, PG-13, PG # - increasing trends = TV-Y # - mostly increasing trends = TV-Y7, TV-G # # # Conclusions # # Zuckflix should focus on contents rated as TV-Y and TV-Y7 (mostly for TV shows) and TV-MA and TV-G (mostly for movies). # # //Check this out a bit more!!! # # A few ratings such as PG-13 and TV-PG have mixed trends, but they could still be options to explore as Zuckflix could potentially revive them. Since the relative decrease seems not to be constant. # recent tv shows ratings (similar process for movies and overall) tv_shows_rating = pd.DataFrame(tv_shows['rating'].value_counts()).reset_index() ax2 = tv_shows_rating.head(5).plot.bar(x='index', y='rating', rot=0, figsize=(6,4)) # ratings trends over time (similar process for movies and tv shows) ratings_overall = recent_shows[recent_shows['rating'] == 'TV-Y'] overall_ratings_trends = ratings_overall.groupby('release_year')['rating'].count() overall_ratings_trends.plot.bar(figsize=(6,4)) # # TV Shows Duration # # It looks like a significant majority of TV shows have only 1 season, with TV shows of 2 or 3 seasons complete the top 3 TV shows in terms of duration. # # # Movies Duration # # It looks like the majority of movies tend to last between 85 and 112 min (roughly 1 hour 25 min and 1 hour 45 min). # # # Recommendations # # Zuckflix should focus on content that fits within these duration trends. # # For tv shows, it should focus on content with 1 to 3 seasons. I would strongly suggest tv shows with 1 season. Zuckflix should fund production of more seasons if and only if a TV show gains apparent popularity. Metrics for this could be a tv show's coverage in the media such as the way the tv show Manifest has been talked about in the media in the last 2 months. # # For movies, Zuckflix should produce or fund movies that last around 1 hour and 30 min as they will be able to keep viewers interested enough in the movie without necessarily getting bored, tired, or falling asleep. recent_tv_shows.loc[:, 'duration'] = recent_tv_shows['duration'].str.replace(' Season', '').str.replace('s', '') recent_tv_shows.head(1) tv_shows_duration = pd.DataFrame(recent_tv_shows['duration'].value_counts()).reset_index() tv_shows_duration.rename(columns={"index": "duration (in seasons)", "duration": "num of tv shows"}, inplace=True) # tv_shows_duration.head() ax = tv_shows_duration.head().plot.bar(x='duration (in seasons)', y='num of tv shows', rot=0, figsize=(6,4)) # figure out why the copy has flaws recent_movies.loc[:, 'duration'] = recent_movies['duration'].str.replace(' min', '') int_movies = recent_movies.copy() int_movies['duration'] = int_movies['duration'].astype(int) sorted_movies = int_movies.sort_values(by=['duration']) sorted_movies = sorted_movies[sorted_movies['duration'] < 200] sns.set(rc={'figure.figsize':(10,5)}) sns.histplot(data=sorted_movies, x='duration') # # Country Recent Trends # # Analyzing all countries a movie or TV show might have been produced in the last 5 years, it looks like the US comes first with around 2000 shows produced there in total; 5 times the number of shows compared to the 2nd country (India). # # The recommendation would then be for Zuckflix to focus mostly on the top 3 countries (US, India, and UK) for the next 5 years at least. Once, it is established in these places, it could look into producing contents in other countries and, especially, in Canada and France, countries that complete the top 5 countries in which most shows are produced. # + # all recent countries all_countries = netflix_titles.copy() # split the strings into lists all_countries['country'] = all_countries['country'].str.split(', ') # explode the lists all_countries = all_countries.explode('country').reset_index(drop=True) # dataframe fro recent shows with all countries sep_country_df = pd.DataFrame(all_countries['country'].value_counts()).reset_index() sep_country_df.head() # + # all recent countries all_recent_countries = recent_shows.copy() # split the strings into lists all_recent_countries['country'] = all_recent_countries['country'].str.split(', ') # explode the lists all_recent_countries = all_recent_countries.explode('country').reset_index(drop=True) # dataframe fro recent shows with all countries rec_sep_country_df = pd.DataFrame(all_recent_countries['country'].value_counts()).reset_index() rec_sep_country_df.head() # - # # Exploring if the countries trends vary for TV shows or movies # # Movies # + # all recent countries all_recent_countries_movies = recent_movies.copy() # split the strings into lists all_recent_countries_movies['country'] = all_recent_countries_movies['country'].str.split(', ') # explode the lists all_recent_countries_movies = all_recent_countries_movies.explode('country').reset_index(drop=True) # dataframe fro recent shows with all countries rec_sep_country_movies_df = pd.DataFrame(all_recent_countries_movies['country'].value_counts()).reset_index() rec_sep_country_movies_df.head(10) # - # # TV Shows # + # all recent countries all_recent_countries_tv_shows = recent_tv_shows.copy() # split the strings into lists all_recent_countries_tv_shows['country'] = all_recent_countries_tv_shows['country'].str.split(', ') # explode the lists all_recent_countries_tv_shows = all_recent_countries_tv_shows.explode('country').reset_index(drop=True) # dataframe fro recent shows with all countries rec_sep_country_tv_shows_df = pd.DataFrame(all_recent_countries_tv_shows['country'].value_counts()).reset_index() rec_sep_country_tv_shows_df.head(10) # - # # Country and Movies vs. TV Shows # # It looks like most movies are produced in the top 5 countries as all shows. However, for TV shows, we notice different trends; the US, UK, South Korea, and Japan seem to dominate the market. # # This tells us that Zuckflix would need to implement different strategies for movies and TV shows in order to be most successful in this venture. # # Exploring the most popular genres # # # Movies # # International movies seem to be most popular, followed by dramas, comedies, documentaries, and independent movies. # # # TV Shows # # Similar trends with the movies can be observed for the top 3 genres: International TV shows, TV Dramas, TV comedies are most popular. They are followed by crime TV shows and Kids TV. # # Although the trends might not be very pronounced (especially for TV shows), analysis suggest that international, drama, and comedy are most popular accross both movies and TV shows. Zuckflix should, therefore, first focus on these. # # If Zuckflix becomes successful in the long run (at least 1 or 2 years), it could expand into other genres. # # Code for previous conclusions # # First are the movies, and second are the TV shows. # + # copying the dataframe all_movies_genres = recent_movies.copy() # split the strings into lists all_movies_genres['listed_in'] = all_movies_genres['listed_in'].str.split(', ') # explode the lists all_movies_genres = all_movies_genres.explode('listed_in').reset_index(drop=True) # dataframe fro recent shows with all countries movies_genres_df = pd.DataFrame(all_movies_genres['listed_in'].value_counts()).reset_index() movies_genres_df.head() # + # copying the dataframe all_tv_shows_genres = recent_tv_shows.copy() # split the strings into lists all_tv_shows_genres['listed_in'] = all_tv_shows_genres['listed_in'].str.split(', ') # explode the lists all_tv_shows_genres = all_tv_shows_genres.explode('listed_in').reset_index(drop=True) # dataframe fro recent shows with all countries tv_shows_genres_df = pd.DataFrame(all_tv_shows_genres['listed_in'].value_counts()).reset_index() tv_shows_genres_df.head() # - # # Analysis of trends in a specific country: France # # Overall Findings # # Content entirely produced in France is almost equal to content partially produced in France. 115 exclusively produced in France and 349 produced fully or in part in France. # # # Over the last 5 years # # Content exclusively produced in France is almost a 3rd of all content produced (fully or partially) in France. This shows a decline in content exclusively produced in France. # france content (fully or partially) france_content = netflix_titles[netflix_titles['country'] == 'France'] all_france_content = all_countries[all_countries['country'] == 'France'] rec_france_content = recent_shows[recent_shows['country'] == 'France'] all_rec_france_content = all_recent_countries[all_recent_countries['country'] == 'France'] all_rec_france_content # + # all_rec_france_content_trends = all_rec_france_content.groupby('release_year').count() # all_rec_france_content_trends.plot.bar(figsize=(6,4)) # - # # France Movies vs. TV Shows # # We have 146 movies and 59 TV shows; which suggests the popularity of movies more than TV shows. # However, if we consider trends over the last 5 years, we notice a significant decrease in the number of movies produced while we can observe a more or less relative increase in the number of TV show offerings. # # # Note: # An interesting observation is that there was no show released yet in 2021 at the time this dataset was made available on Kaggle. all_rec_france_movies = all_rec_france_content[all_rec_france_content['type'] == 'Movie'] all_rec_france_movies.count()[0] all_rec_france_movies = all_rec_france_content[all_rec_france_content['type'] == 'Movie'] all_rec_france_movies_trends = all_rec_france_movies.groupby('release_year')['type'].count() all_rec_france_movies_trends.plot.bar(figsize=(6,4)) all_rec_france_tv_shows = all_rec_france_content[all_rec_france_content['type'] == 'TV Show'] all_rec_france_tv_shows_trends = all_rec_france_tv_shows.groupby('release_year')['type'].count() all_rec_france_tv_shows_trends.plot.bar(figsize=(6,4)) # # France Content Ratings # # For movies, content rated TV-MA is largely more popular, followed by TV-14 and R rated content. # # For TV shows, content rated TV-MA is largely more popular compared to the other 4 ratings (TV-Y, TV-Y7, TV-PG, and TV-14). # # # Recommendation # # Although there is a decrease in TV-MA in 2020 compared to 2019, I would recommend Zuckflix to focus on content rated TV-MA. That decrease could potentially be explained by the slow down of show production in 2020 (due to Covid-19). france_movies_rating = pd.DataFrame(all_rec_france_movies['rating'].value_counts()).reset_index() frax1 = france_movies_rating.plot.bar(x='index', y='rating', rot=0, figsize=(6,4)) france_tv_shows_rating = pd.DataFrame(all_rec_france_tv_shows['rating'].value_counts()).reset_index() frax2 = france_tv_shows_rating.plot.bar(x='index', y='rating', rot=0, figsize=(6,4)) france_ratings_overall = all_rec_france_content[all_rec_france_content['rating'] == 'TV-MA'] france_overall_ratings_trends = france_ratings_overall.groupby('release_year')['rating'].count() france_overall_ratings_trends.plot.bar(figsize=(6,4)) # # Duration Trends for France # # Same trends for tv shows; mostly 1 season, followed by 2 or 3 seasons for top 3. # # Most movies seem to last between 80 and 100 minutes (roughly 1 hour 20 minutes and 1 hour 40 minutes). # # # Recommendation # # Focus on content of that length. all_rec_france_tv_shows.loc[:, 'duration'] = all_rec_france_tv_shows['duration'].str.replace(' Season', '').str.replace('s', '') all_rec_france_tv_shows.head(1) all_rec_france_tv_shows_duration = pd.DataFrame(all_rec_france_tv_shows['duration'].value_counts()).reset_index() all_rec_france_tv_shows_duration.rename(columns={"index": "duration (in seasons)", "duration": "num of tv shows"}, inplace=True) # tv_shows_duration.head() frax3 = all_rec_france_tv_shows_duration.head().plot.bar(x='duration (in seasons)', y='num of tv shows', rot=0, figsize=(6,4)) # figure out why the copy has flaws all_rec_france_movies.loc[:, 'duration'] = all_rec_france_movies['duration'].str.replace(' min', '') int_france_movies = all_rec_france_movies.copy() int_france_movies['duration'] = int_france_movies['duration'].astype(int) sorted_france_movies = int_france_movies.sort_values(by=['duration']) # sorted_france_movies = sorted_france_movies[sorted_movies['duration'] < 200] sns.set(rc={'figure.figsize':(10,5)}) sns.histplot(data=sorted_france_movies, x='duration') # # Genre Trends for France # # Movies # # International movies as well as dramas seem to be most popular. # # It would be great for Zuckflix to focus on those in the first few years. # + # copying the dataframe all_france_movies_genres = all_rec_france_movies.copy() # split the strings into lists all_france_movies_genres['listed_in'] = all_france_movies_genres['listed_in'].str.split(', ') # explode the lists all_france_movies_genres = all_france_movies_genres.explode('listed_in').reset_index(drop=True) # dataframe fro recent shows with all countries france_movies_genres_df = pd.DataFrame(all_france_movies_genres['listed_in'].value_counts()).reset_index() france_movies_genres_df.head() # - # # TV Shows # # International TV shows as well as TV drams and kids TV are most popular. # # Probably, Zuckflix should focus on international TV shows # + # copying the dataframe all_france_tv_shows_genres = all_rec_france_tv_shows.copy() # split the strings into lists all_france_tv_shows_genres['listed_in'] = all_france_tv_shows_genres['listed_in'].str.split(', ') # explode the lists all_france_tv_shows_genres = all_france_tv_shows_genres.explode('listed_in').reset_index(drop=True) # dataframe fro recent shows with all countries france_tv_shows_genres_df = pd.DataFrame(all_france_tv_shows_genres['listed_in'].value_counts()).reset_index() france_tv_shows_genres_df.head() # - # # Remaining Analysis # # ## Priority # # # ## Less Priority # director # cast # date_added vs. release_year # description (probably not) # # To Do if Time Allows # # Check what are the countries where content partially produced in France is also produced netflix_titles
release_year_version.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Decision Tree # ## Data Cleaning # ### ADULT Dataset # + import numpy as np import pandas as pd from sklearn import svm, datasets adult = pd.read_csv('adult.data') adult.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education num', 'marital status', 'occupation', 'relationship', 'race', 'sex', 'capital gain', 'capital loss', 'hours per week', 'country', 'income'] #replaces any empty spots with a blank space adult['workclass'] = adult['workclass'].replace(' ?', np.nan) adult['occupation'] = adult['occupation'].replace(' ?', np.nan) adult['country'] = adult['country'].replace(' ?', np.nan) #takes out any blank spaces in the dataset adult.dropna(how='any', inplace=True) #drops fnlwgt column adult.drop(['fnlwgt'], axis=1, inplace=True) #one-hot encoding adult['workclass'] = adult['workclass'].map({' Self-emp-not-inc': 0, ' Self-emp-inc': 1, ' Local-gov': 2, ' State-gov': 3, ' Federal-gov': 4, ' Private': 5, ' Without-pay': 6,}) adult['education'] = adult['education'].map({' Preschool': 0, ' 1st-4th': 1, ' 5th-6th': 2, ' 7th-8th': 3, ' 9th': 4, ' 10th': 5, ' 11th': 6, ' 12th': 7, ' HS-grad': 8, ' Some-college': 9, ' Assoc-acdm': 10, ' Assoc-voc': 11, ' Bachelors': 12, ' Masters': 13, ' Doctorate': 14, ' Prof-school': 15}) adult['marital status'] = adult['marital status'].map({' Never-married': 0, ' Married-civ-spouse': 1, ' Married-AF-spouse': 2, ' Married-spouse-absent': 3, ' Divorced': 4, ' Separated': 5, ' Widowed': 6}) adult['occupation'] = adult['occupation'].map({' Exec-managerial': 0, ' Handlers-cleaners': 1, ' Prof-specialty': 2, ' Other-service': 3, ' Adm-clerical': 4, ' Sales': 5, ' Transport-moving': 6, ' Farming-fishing': 7, ' Machine-op-inspct': 8, ' Tech-support': 9, ' Craft-repair': 10, ' Protective-serv': 11, ' Armed-Forces': 12, ' Priv-house-serv': 13}) adult['relationship'] = adult['relationship'].map({' Husband': 0, ' Not-in-family': 1, ' Wife': 2, ' Own-child': 3, ' Unmarried': 4, ' Other-relative': 5}) adult['race'] = adult['race'].map({' White': 0, ' Black': 1, ' Asian-Pac-Islander': 2, ' Amer-Indian-Eskimo': 3, ' Other': 4}) adult['country'] = np.where(adult['country'] != ' United-States', 0, 1) adult['sex'] = adult['sex'].map({' Male': 0, ' Female': 1}) adult['income'] = adult['income'].map({' <=50K': 0, ' >50K': 1}) adult.head() # - # ### LETTER Dataset # + letter = pd.read_csv('letter-recognition.data') letter.columns = ['lettr', 'x-box', 'y-box', 'width', 'height', 'pixels', 'x-bar', 'y-bar', 'x2bar', 'y2bar', 'xybar', 'x2ybr', 'xy2br', 'x ege', 'xegvy', 'y-ege', 'yegvx'] #one-hot encoding letter['lettr'] = letter['lettr'].map({ 'A':0, 'B':0, 'C':0, 'D':0, 'E':0, 'F':0, 'G':0, 'H':0, 'I':0, 'J':0, 'K':0, 'L':0, 'M':0, 'N':1, 'O':1, 'P':1, 'Q':1, 'R':1, 'S':1, 'T':1, 'U':1, 'V':1, 'W':1, 'X':1, 'Y':1, 'Z':1}) letter.head() # - # ### CHESS Dataset # + chess = pd.read_csv('krkopt.data') chess.columns = ['White King file', 'White King rank', 'White Rook file', 'White Rook rank', 'Black King file', 'Black King rank', 'Optimal Depth of Win'] #one-hot encoding chess['White King file'] = chess['White King file'].map({'a': 0, 'b': 1, 'c': 2, 'd': 3}) chess['White Rook file'] = chess['White Rook file'].map({'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7}) chess['Black King file'] = chess['Black King file'].map({'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7}) chess['Optimal Depth of Win'] = chess['Optimal Depth of Win'].map({ 'zero': 1, 'one': 1, 'two': 1, 'three': 1, 'four': 1, 'five': 1, 'six': 1, 'seven': 1, 'eight': 1, 'nine': 0, 'ten': 0, 'eleven': 0, 'twelve': 0, 'thirteen': 0, 'fourteen': 0, 'fifteen': 0, 'sixteen': 0, 'draw': 0}) chess.head() # - # ### OCCUPANCY Dataset # + occupancy = pd.read_csv('occupancy_data.txt') #drop unnecessary column occupancy = occupancy.drop('date',axis=1) occupancy.head() # - # ## Data Analysis # # ### Training DT # + from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.model_selection import StratifiedKFold from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score from sklearn.metrics import f1_score NUM_TRIALS = 5 #adult data & target X_a = adult.drop('income', axis=1) Y_a = adult['income'] #letter data & target X_l = letter.drop('lettr', axis=1) Y_l = letter['lettr'] #chess data & target X_c = chess.drop('Optimal Depth of Win', axis=1) Y_c = chess['Optimal Depth of Win'] #occupancy data & target X_o = occupancy.drop('Occupancy',axis=1) Y_o = occupancy['Occupancy'] # + # %%time pipe = Pipeline([('std', StandardScaler()), ('classifier', DecisionTreeClassifier())]) X_a_train, X_a_test, Y_a_train, Y_a_test = train_test_split(X_a,Y_a, train_size=5000) X_l_train, X_l_test, Y_l_train, Y_l_test = train_test_split(X_l,Y_l, train_size=5000) X_c_train, X_c_test, Y_c_train, Y_c_test = train_test_split(X_c,Y_c, train_size=5000) X_o_train, X_o_test, Y_o_train, Y_o_test = train_test_split(X_o,Y_o, train_size=5000) # Create search space of candidate learning algorithms and their hyperparameters # note lbfgs can't do l1, and if you pass penalty='none' it expects no C value search_space = [{'classifier': [DecisionTreeClassifier()], 'classifier__criterion': ['entropy'], 'classifier__max_depth': [1,2,3,4], 'classifier__min_samples_split': [2,3,4,5]}, {'classifier': [DecisionTreeClassifier()], 'classifier__criterion': ['gini'], 'classifier__max_depth': [1,2,3,4], 'classifier__min_samples_split': [2,3,4,5]}, {'classifier': [DecisionTreeClassifier()], 'classifier__criterion': ['gini','entropy'], 'classifier__min_samples_split': [2,3]}, ] # Create grid search model = GridSearchCV(pipe, search_space, cv=StratifiedKFold(n_splits=5), scoring=['accuracy', 'roc_auc_ovr', 'f1_micro'], refit=False, verbose=0) # Fit grid search model_adult = model.fit(X_a_train, Y_a_train) model_letter = model.fit(X_l_train, Y_l_train) model_chess = model.fit(X_c_train, Y_c_train) model_occupancy = model.fit(X_o_train, Y_o_train) # below we will get a bunch of warnings as some hyperparameters settings don't converge # - # ## Raw Tables #function that finds the average def avg(array): return sum(array)/len(array) # ### Accuracy adult_acc_1 = avg(model_adult.cv_results_['split0_test_accuracy']) adult_acc_2 = avg(model_adult.cv_results_['split1_test_accuracy']) adult_acc_3 = avg(model_adult.cv_results_['split2_test_accuracy']) adult_acc_4 = avg(model_adult.cv_results_['split3_test_accuracy']) adult_acc_5 = avg(model_adult.cv_results_['split4_test_accuracy']) letter_acc_1 = avg(model_letter.cv_results_['split0_test_accuracy']) letter_acc_2 = avg(model_letter.cv_results_['split1_test_accuracy']) letter_acc_3 = avg(model_letter.cv_results_['split2_test_accuracy']) letter_acc_4 = avg(model_letter.cv_results_['split3_test_accuracy']) letter_acc_5 = avg(model_letter.cv_results_['split4_test_accuracy']) chess_acc_1 = avg(model_chess.cv_results_['split0_test_accuracy']) chess_acc_2 = avg(model_chess.cv_results_['split1_test_accuracy']) chess_acc_3 = avg(model_chess.cv_results_['split2_test_accuracy']) chess_acc_4 = avg(model_chess.cv_results_['split3_test_accuracy']) chess_acc_5 = avg(model_chess.cv_results_['split4_test_accuracy']) occupancy_acc_1 = avg(model_occupancy.cv_results_['split0_test_accuracy']) occupancy_acc_2 = avg(model_occupancy.cv_results_['split1_test_accuracy']) occupancy_acc_3 = avg(model_occupancy.cv_results_['split2_test_accuracy']) occupancy_acc_4 = avg(model_occupancy.cv_results_['split3_test_accuracy']) occupancy_acc_5 = avg(model_occupancy.cv_results_['split4_test_accuracy']) dt_acc = [adult_acc_1, adult_acc_2, adult_acc_3, adult_acc_4, adult_acc_5, letter_acc_1, letter_acc_2, letter_acc_3, letter_acc_4, letter_acc_5, chess_acc_1, chess_acc_2, chess_acc_3, chess_acc_4, chess_acc_5, occupancy_acc_1, occupancy_acc_2, occupancy_acc_3, occupancy_acc_4, occupancy_acc_5] dt_acc # ### ROC AUC adult_roc_1 = avg(model_adult.cv_results_['split0_test_roc_auc_ovr']) adult_roc_2 = avg(model_adult.cv_results_['split1_test_roc_auc_ovr']) adult_roc_3 = avg(model_adult.cv_results_['split2_test_roc_auc_ovr']) adult_roc_4 = avg(model_adult.cv_results_['split3_test_roc_auc_ovr']) adult_roc_5 = avg(model_adult.cv_results_['split4_test_roc_auc_ovr']) letter_roc_1 = avg(model_letter.cv_results_['split0_test_roc_auc_ovr']) letter_roc_2 = avg(model_letter.cv_results_['split1_test_roc_auc_ovr']) letter_roc_3 = avg(model_letter.cv_results_['split2_test_roc_auc_ovr']) letter_roc_4 = avg(model_letter.cv_results_['split3_test_roc_auc_ovr']) letter_roc_5 = avg(model_letter.cv_results_['split4_test_roc_auc_ovr']) chess_roc_1 = avg(model_chess.cv_results_['split0_test_roc_auc_ovr']) chess_roc_2 = avg(model_chess.cv_results_['split1_test_roc_auc_ovr']) chess_roc_3 = avg(model_chess.cv_results_['split2_test_roc_auc_ovr']) chess_roc_4 = avg(model_chess.cv_results_['split3_test_roc_auc_ovr']) chess_roc_5 = avg(model_chess.cv_results_['split4_test_roc_auc_ovr']) occupancy_roc_1 = avg(model_occupancy.cv_results_['split0_test_roc_auc_ovr']) occupancy_roc_2 = avg(model_occupancy.cv_results_['split1_test_roc_auc_ovr']) occupancy_roc_3 = avg(model_occupancy.cv_results_['split2_test_roc_auc_ovr']) occupancy_roc_4 = avg(model_occupancy.cv_results_['split3_test_roc_auc_ovr']) occupancy_roc_5 = avg(model_occupancy.cv_results_['split4_test_roc_auc_ovr']) dt_roc = [adult_roc_1, adult_roc_2, adult_roc_3, adult_roc_4, adult_roc_5, letter_roc_1, letter_roc_2, letter_roc_3, letter_roc_4, letter_roc_5, chess_roc_1, chess_roc_2, chess_roc_3, chess_roc_4, chess_roc_5, occupancy_roc_1, occupancy_roc_2, occupancy_roc_3, occupancy_roc_4, occupancy_roc_5] dt_roc # ### F1 adult_f1_1 = avg(model_adult.cv_results_['split0_test_f1_micro']) adult_f1_2 = avg(model_adult.cv_results_['split1_test_f1_micro']) adult_f1_3 = avg(model_adult.cv_results_['split2_test_f1_micro']) adult_f1_4 = avg(model_adult.cv_results_['split3_test_f1_micro']) adult_f1_5 = avg(model_adult.cv_results_['split4_test_f1_micro']) letter_f1_1 = avg(model_letter.cv_results_['split0_test_f1_micro']) letter_f1_2 = avg(model_letter.cv_results_['split1_test_f1_micro']) letter_f1_3 = avg(model_letter.cv_results_['split2_test_f1_micro']) letter_f1_4 = avg(model_letter.cv_results_['split3_test_f1_micro']) letter_f1_5 = avg(model_letter.cv_results_['split4_test_f1_micro']) chess_f1_1 = avg(model_chess.cv_results_['split0_test_f1_micro']) chess_f1_2 = avg(model_chess.cv_results_['split1_test_f1_micro']) chess_f1_3 = avg(model_chess.cv_results_['split2_test_f1_micro']) chess_f1_4 = avg(model_chess.cv_results_['split3_test_f1_micro']) chess_f1_5 = avg(model_chess.cv_results_['split4_test_f1_micro']) occupancy_f1_1 = avg(model_occupancy.cv_results_['split0_test_f1_micro']) occupancy_f1_2 = avg(model_occupancy.cv_results_['split1_test_f1_micro']) occupancy_f1_3 = avg(model_occupancy.cv_results_['split2_test_f1_micro']) occupancy_f1_4 = avg(model_occupancy.cv_results_['split3_test_f1_micro']) occupancy_f1_5 = avg(model_occupancy.cv_results_['split4_test_f1_micro']) dt_f1 = [adult_f1_1, adult_f1_2, adult_f1_3, adult_f1_4, adult_f1_5, letter_f1_1, letter_f1_2, letter_f1_3, letter_f1_4, letter_f1_5, chess_f1_1, chess_f1_2, chess_f1_3, chess_f1_4, chess_f1_5, occupancy_f1_1, occupancy_f1_2, occupancy_f1_3, occupancy_f1_4, occupancy_f1_5] dt_f1 # ### Analysis on ADULT # + #classification report on accuracy param_adult_accuracy = model_adult.cv_results_['params'][np.argmin(model_adult.cv_results_['rank_test_accuracy'])] pipe.set_params(**param_adult_accuracy) pipe.fit(X_a_train, Y_a_train) Y_a_train_pred_accuracy = pipe.predict(X_a_train) print("Training Accuracy Set: \n", classification_report(Y_a_train,Y_a_train_pred_accuracy)) pipe.set_params(**param_adult_accuracy) pipe.fit(X_a_test, Y_a_test) Y_a_test_pred_accuracy = pipe.predict(X_a_test) print("Testing Accuracy Set: \n", classification_report(Y_a_test,Y_a_test_pred_accuracy)) # + #classification report on roc_auc_ovr param_adult_roc_auc_ovr = model_adult.cv_results_['params'][np.argmin(model_adult.cv_results_['rank_test_roc_auc_ovr'])] pipe.set_params(**param_adult_roc_auc_ovr) pipe.fit(X_a_train, Y_a_train) Y_a_train_pred_roc_auc_ovr = pipe.predict(X_a_train) print("Training ROC AUC Set: \n", classification_report(Y_a_train,Y_a_train_pred_roc_auc_ovr)) pipe.set_params(**param_adult_roc_auc_ovr) pipe.fit(X_a_test, Y_a_test) Y_a_test_pred_roc_auc_ovr = pipe.predict(X_a_test) print("Testing ROC AUC Set: \n", classification_report(Y_a_test,Y_a_test_pred_roc_auc_ovr)) # + #classification report on f1_micro param_adult_f1_micro = model_adult.cv_results_['params'][np.argmin(model_adult.cv_results_['rank_test_f1_micro'])] pipe.set_params(**param_adult_f1_micro) pipe.fit(X_a_train, Y_a_train) Y_a_train_pred_f1_micro = pipe.predict(X_a_train) print("Training F1 Micro Set\n",classification_report(Y_a_train,Y_a_train_pred_f1_micro)) pipe.set_params(**param_adult_f1_micro) pipe.fit(X_a_test, Y_a_test) Y_a_test_pred_f1_micro = pipe.predict(X_a_test) print("Testing F1 Micro Set\n", classification_report(Y_a_test,Y_a_test_pred_f1_micro)) # - # ### Analysis on LETTER # + #classification report on accuracy param_letter_accuracy = model_letter.cv_results_['params'][np.argmin(model_letter.cv_results_['rank_test_accuracy'])] pipe.set_params(**param_letter_accuracy) pipe.fit(X_l_train, Y_l_train) Y_l_train_pred_accuracy = pipe.predict(X_l_train) print("Training Accuracy Set: \n", classification_report(Y_l_train,Y_l_train_pred_accuracy)) pipe.set_params(**param_adult_accuracy) pipe.fit(X_l_test, Y_l_test) Y_l_test_pred_accuracy = pipe.predict(X_l_test) print("Testing Accuracy Set: \n", classification_report(Y_l_test,Y_l_test_pred_accuracy)) # + #classification report on roc_auc_ovr param_letter_roc_auc_ovr = model_letter.cv_results_['params'][np.argmin(model_letter.cv_results_['rank_test_roc_auc_ovr'])] pipe.set_params(**param_letter_roc_auc_ovr) pipe.fit(X_l_train, Y_l_train) Y_l_train_pred_roc_auc_ovr = pipe.predict(X_l_train) print("Training ROC AUC Set: \n", classification_report(Y_l_train,Y_l_train_pred_roc_auc_ovr)) pipe.set_params(**param_adult_roc_auc_ovr) pipe.fit(X_l_test, Y_l_test) Y_l_test_pred_roc_auc_ovr = pipe.predict(X_l_test) print("Testing ROC AUC Set: \n", classification_report(Y_l_test,Y_l_test_pred_roc_auc_ovr)) # + #classification report on f1_micro param_letter_f1_micro = model_letter.cv_results_['params'][np.argmin(model_letter.cv_results_['rank_test_f1_micro'])] pipe.set_params(**param_letter_f1_micro) pipe.fit(X_l_train, Y_l_train) Y_l_train_pred_f1_micro = pipe.predict(X_l_train) print("Training F1 Micro Set\n",classification_report(Y_l_train,Y_l_train_pred_f1_micro)) pipe.set_params(**param_adult_f1_micro) pipe.fit(X_l_test, Y_l_test) Y_l_test_pred_f1_micro = pipe.predict(X_l_test) print("Testing F1 Micro Set\n", classification_report(Y_l_test,Y_l_test_pred_f1_micro)) # - # ### Analysis on CHESS # + #classification report on accuracy param_chess_accuracy = model_chess.cv_results_['params'][np.argmin(model_chess.cv_results_['rank_test_accuracy'])] pipe.set_params(**param_chess_accuracy) pipe.fit(X_c_train, Y_c_train) Y_c_train_pred_accuracy = pipe.predict(X_c_train) print("Training Accuracy Set: \n", classification_report(Y_c_train,Y_c_train_pred_accuracy)) pipe.set_params(**param_adult_accuracy) pipe.fit(X_c_test, Y_c_test) Y_c_test_pred_accuracy = pipe.predict(X_c_test) print("Testing Accuracy Set: \n", classification_report(Y_c_test,Y_c_test_pred_accuracy)) # + #classification report on roc_auc_ovr param_chess_roc_auc_ovr = model_chess.cv_results_['params'][np.argmin(model_chess.cv_results_['rank_test_roc_auc_ovr'])] pipe.set_params(**param_chess_roc_auc_ovr) pipe.fit(X_c_train, Y_c_train) Y_c_train_pred_roc_auc_ovr = pipe.predict(X_c_train) print("Training ROC AUC Set: \n", classification_report(Y_c_train,Y_c_train_pred_roc_auc_ovr)) pipe.set_params(**param_adult_roc_auc_ovr) pipe.fit(X_c_test, Y_c_test) Y_c_test_pred_roc_auc_ovr = pipe.predict(X_c_test) print("Testing ROC AUC Set: \n", classification_report(Y_c_test,Y_c_test_pred_roc_auc_ovr)) # + #classification report on f1_micro param_chess_f1_micro = model_chess.cv_results_['params'][np.argmin(model_chess.cv_results_['rank_test_f1_micro'])] pipe.set_params(**param_chess_f1_micro) pipe.fit(X_c_train, Y_c_train) Y_c_train_pred_f1_micro = pipe.predict(X_c_train) print("Training F1 Micro Set\n",classification_report(Y_c_train,Y_c_train_pred_f1_micro)) pipe.set_params(**param_adult_f1_micro) pipe.fit(X_c_test, Y_c_test) Y_c_test_pred_f1_micro = pipe.predict(X_c_test) print("Testing F1 Micro Set\n", classification_report(Y_c_test,Y_c_test_pred_f1_micro)) # - # ### Analysis on OCCUPANCY # + #classification report on accuracy param_occupancy_accuracy = model_occupancy.cv_results_['params'][np.argmin(model_occupancy.cv_results_['rank_test_accuracy'])] pipe.set_params(**param_occupancy_accuracy) pipe.fit(X_o_train, Y_o_train) Y_o_train_pred_accuracy = pipe.predict(X_o_train) print("Training Accuracy Set: \n", classification_report(Y_o_train,Y_o_train_pred_accuracy)) pipe.set_params(**param_adult_accuracy) pipe.fit(X_o_test, Y_o_test) Y_o_test_pred_accuracy = pipe.predict(X_o_test) print("Testing Accuracy Set: \n", classification_report(Y_o_test,Y_o_test_pred_accuracy)) # + #classification report on roc_auc_ovr param_occupancy_roc_auc_ovr = model_occupancy.cv_results_['params'][np.argmin(model_occupancy.cv_results_['rank_test_roc_auc_ovr'])] pipe.set_params(**param_occupancy_roc_auc_ovr) pipe.fit(X_o_train, Y_o_train) Y_o_train_pred_roc_auc_ovr = pipe.predict(X_o_train) print("Training ROC AUC Set: \n", classification_report(Y_o_train,Y_o_train_pred_roc_auc_ovr)) pipe.set_params(**param_adult_roc_auc_ovr) pipe.fit(X_o_test, Y_o_test) Y_o_test_pred_roc_auc_ovr = pipe.predict(X_o_test) print("Testing ROC AUC Set: \n", classification_report(Y_o_test,Y_o_test_pred_roc_auc_ovr)) # + #classification report on f1_micro param_occupancy_f1_micro = model_occupancy.cv_results_['params'][np.argmin(model_occupancy.cv_results_['rank_test_f1_micro'])] pipe.set_params(**param_occupancy_f1_micro) pipe.fit(X_o_train, Y_o_train) Y_o_train_pred_f1_micro = pipe.predict(X_o_train) print("Training F1 Micro Set\n",classification_report(Y_o_train,Y_o_train_pred_f1_micro)) pipe.set_params(**param_adult_f1_micro) pipe.fit(X_o_test, Y_o_test) Y_o_test_pred_f1_micro = pipe.predict(X_o_test) print("Testing F1 Micro Set\n", classification_report(Y_o_test,Y_o_test_pred_f1_micro)) # - # ## Scores # ### Testing Scores graph_test = {'Accuracy': [accuracy_score(Y_a_test_pred_accuracy,Y_a_test), accuracy_score(Y_l_test_pred_accuracy,Y_l_test), accuracy_score(Y_c_test_pred_accuracy,Y_c_test), accuracy_score(Y_o_test_pred_accuracy,Y_o_test)], 'ROC AUC': [roc_auc_score(Y_a_test_pred_roc_auc_ovr,Y_a_test), roc_auc_score(Y_l_test_pred_roc_auc_ovr,Y_l_test), roc_auc_score(Y_c_test_pred_roc_auc_ovr,Y_c_test), roc_auc_score(Y_o_test_pred_roc_auc_ovr,Y_o_test)], 'F1 score': [f1_score(Y_a_test_pred_f1_micro,Y_a_test), f1_score(Y_l_test_pred_f1_micro,Y_l_test), f1_score(Y_c_test_pred_f1_micro,Y_c_test), f1_score(Y_o_test_pred_f1_micro,Y_o_test)]} test_score_df = pd.DataFrame(graph_test, columns=['Accuracy', 'ROC AUC','F1 score'], index = ['ADULT', 'LETTER', 'CHESS', 'OCCUPANCY']) print(test_score_df) # ### Training Scores graph_train = {'Accuracy': [accuracy_score(Y_a_train_pred_accuracy,Y_a_train), accuracy_score(Y_l_train_pred_accuracy,Y_l_train), accuracy_score(Y_c_train_pred_accuracy,Y_c_train), accuracy_score(Y_o_train_pred_accuracy,Y_o_train)], 'ROC AUC': [roc_auc_score(Y_a_train_pred_roc_auc_ovr,Y_a_train), roc_auc_score(Y_l_train_pred_roc_auc_ovr,Y_l_train), roc_auc_score(Y_c_train_pred_roc_auc_ovr,Y_c_train), roc_auc_score(Y_o_train_pred_roc_auc_ovr,Y_o_train)], 'F1 score': [f1_score(Y_a_train_pred_f1_micro,Y_a_train), f1_score(Y_l_train_pred_f1_micro,Y_l_train), f1_score(Y_c_train_pred_f1_micro,Y_c_train), f1_score(Y_o_train_pred_f1_micro,Y_o_train)]} train_score_df = pd.DataFrame(graph_train, columns=['Accuracy', 'ROC AUC','F1 score'], index = ['ADULT', 'LETTER', 'CHESS', 'OCCUPANCY']) print(train_score_df) # ## Heatmaps # ### ADULT Dataset # + import seaborn as sns results_adult = pd.DataFrame( model_adult.cv_results_['params'] ) # next grab the score resulting from those parameters, add it to the data # score is accuracy; to display it as misclassification error we use 1 - x results_adult['score_acc'] = 1 - model_adult.cv_results_['mean_test_accuracy'] # get rid of classifier__XX in columns cols_adult = results_adult.columns.to_series().str.split('__').apply(lambda x: x[-1]) results_adult.columns = cols_adult # lets show the results for the saga solver across penalty & C values ax_adult = sns.heatmap( results_adult.query('criterion=="gini"').pivot('max_depth','min_samples_split','score_acc'), annot=True, fmt='.3f') # - # ### LETTER Dataset results_letter = pd.DataFrame( model_letter.cv_results_['params'] ) results_letter['score_acc'] = 1 - model_letter.cv_results_['mean_test_accuracy'] cols_letter = results_letter.columns.to_series().str.split('__').apply(lambda x: x[-1]) results_letter.columns = cols_letter ax_letter = sns.heatmap( results_letter.query('criterion=="gini"').pivot('max_depth','min_samples_split','score_acc'), annot=True, fmt='.3f') # ### CHESS Dataset results_chess = pd.DataFrame( model_chess.cv_results_['params'] ) results_chess['score_acc'] = 1 - model_chess.cv_results_['mean_test_accuracy'] cols_chess = results_chess.columns.to_series().str.split('__').apply(lambda x: x[-1]) results_chess.columns = cols_chess ax_chess = sns.heatmap( results_chess.query('criterion=="gini"').pivot('max_depth','min_samples_split','score_acc'), annot=True, fmt='.3f') # ### OCCUPANCY Dataset results_occupancy = pd.DataFrame( model_occupancy.cv_results_['params'] ) results_occupancy['score_acc'] = 1 - model_occupancy.cv_results_['mean_test_accuracy'] cols_occupancy = results_occupancy.columns.to_series().str.split('__').apply(lambda x: x[-1]) results_occupancy.columns = cols_occupancy ax_occupancy = sns.heatmap( results_occupancy.query('criterion=="gini"').pivot('max_depth','min_samples_split','score_acc'), annot=True, fmt='.3f')
python_scripts/Decision Tree Set Up.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img align="left" src="https://ithaka-labs.s3.amazonaws.com/static-files/images/tdm/tdmdocs/CC_BY.png"><br /> # # Created by [<NAME>](http://nkelber.com) and Ted Lawless for [JSTOR Labs](https://labs.jstor.org/) under [Creative Commons CC BY License](https://creativecommons.org/licenses/by/4.0/)<br /> # For questions/comments/improvements, email <EMAIL>.<br /> # ___ # # Python Basics 1 # # **Description:** This lesson describes [operators](https://docs.constellate.org/key-terms/#operator), [expressions](https://docs.constellate.org/key-terms/#expression), data types, [variables](https://docs.constellate.org/key-terms/#variable), and basic [functions](https://docs.constellate.org/key-terms/#function). Complete this lesson if you are familiar with [Jupyter notebooks](https://docs.constellate.org/key-terms/#jupyter-notebook) or have completed *Getting Started with Jupyter Notebooks*, but do not have any experience with [Python](https://docs.constellate.org/key-terms/#python) programming. This is part 1 of 3 in the series *Python Basics* that will prepare you to do text analysis using the [Python](https://docs.constellate.org/key-terms/#python) programming language. # # **Use Case:** For Learners (Detailed explanation, not ideal for researchers) # # **Difficulty:** Beginner # # **Completion Time:** 75 minutes # # **Knowledge Required:** # * [Getting Started with Jupyter Notebooks](./getting-started-with-jupyter.ipynb) # # **Knowledge Recommended:** None # # **Data Format:** None # # **Libraries Used:** None # # **Research Pipeline:** None # ___ # [![Getting Started with Jupyter Notebooks](https://ithaka-labs.s3.amazonaws.com/static-files/images/tdm/tdmdocs/video/python-basics.png)](https://www.youtube.com/watch?v=90wFLSjlFL8) # ## Introduction # # [Python](https://docs.constellate.org/key-terms/#python) is the fastest-growing language in computer programming. Learning [Python](https://docs.constellate.org/key-terms/#python) is a great choice because [Python](https://docs.constellate.org/key-terms/#python) is: # # * Widely-adopted in the digital humanities and data science # * Regarded as an easy-to-learn language # * Flexible, having wide support for working with numerical and textual data # * A skill desired by employers in academic, non-profit, and private sectors # # The second most-popular language for digital humanities and data science work is [R](https://docs.constellate.org/key-terms/#r). We plan to create additional support for learning [R](https://docs.constellate.org/key-terms/#r) soon. If you are interested in helping develop open educational resources for [R](https://docs.constellate.org/key-terms/#r), please reach out to <NAME> (<EMAIL>). # # The skills you'll learn in *Python Basics* 1-3 are general-purpose [Python](https://docs.constellate.org/key-terms/#python) skills, applicable for any of the text analysis notebooks that you may explore later. They are also widely applicable to many other kinds of tasks in [Python](https://docs.constellate.org/key-terms/#python) beyond text analysis. # # **Making Mistakes is Important** # # Every programmer at every skill level gets errors in their code. Making mistakes is how we all learn to program. Programming is a little like solving a puzzle where the goal is to get the desired outcome through a series of attempts. You won't solve the puzzle if you're afraid to test if the pieces match. An error message will not break your computer. Remember, you can always reload a notebook if it stops working properly or you misplace an important piece of code. Under the edit menu, there is an option to undo changes. (Alternatively, you can use **command z** on Mac and **control z** on Windows.) To learn any skill, you need to be willing to play and experiment. Programming is no different. # # ## Expressions and Operators # # The simplest form of Python programming is an [expression](https://docs.constellate.org/key-terms/#expression) using an [operator](https://docs.constellate.org/key-terms/#operator). An [expression](https://docs.constellate.org/key-terms/#expression) is a simple mathematical statement like: # # > 1 + 1 # # The [operator](https://docs.constellate.org/key-terms/#operator) in this case is `+`, sometimes called "plus" or "addition". Try this operation in the code box below. Remember to click the "Run" button or press Ctrl + Enter (Windows) or shift + return (OS X) on your keyboard to run the code. # Type the expression in this code block. Then run it. # Python can handle a large variety of [expressions](https://docs.constellate.org/key-terms/#expression). Let's try subtraction in the next [code cell](https://docs.constellate.org/key-terms/#code-cell). # Type an expression that uses subtraction in this cell. Then run it. # We can also do multiplication (\*) and division (/). While you may have used an "×" to represent multiplication in grade school, [Python](https://docs.constellate.org/key-terms/#python) uses an asterisk (\*). In [Python](https://docs.constellate.org/key-terms/#python), # # > 2 × 2 # # is written as # # > 2 * 2 # # Try a multiplication and a division in the next [code cell](https://docs.constellate.org/key-terms/#code-cell). # Try a multiplication in this cell. Then try a division. # What happens if you combine them? What if you combine them with addition and/or subtraction? # When you run, or **evaluate**, an [expression](https://docs.constellate.org/key-terms/#expression) in [Python](https://docs.constellate.org/key-terms/#python), the order of operations is followed. (In grade school, you may remember learning the shorthand "PEMDAS".) This means that [expressions](https://docs.constellate.org/key-terms/#expression) are evaluated in this order: # # 1. Parentheses # 2. Exponents # 3. Multiplication and Division (from left to right) # 4. Addition and Subtraction (from left to right) # # [Python](https://docs.constellate.org/key-terms/#python) can evaluate parentheses and exponents, as well as a number of additional [operators](https://docs.constellate.org/key-terms/#operator) you may not have learned in grade school. Here are the main [operators](https://docs.constellate.org/key-terms/#operator) that you might use presented in the order they are evaluated: # # |Operator| Operation| Example | Evaluation | # |---|----|---|---| # |\*\*| Exponent/Power| 3 ** 3 | 27 | # |%| Modulus/Remainder| 34 % 6 | 4 | # |/| Division | 30 / 6 | 5| # |\*| Multiplication | 7 * 8 | 56 | # |-| Subtraction | 18 - 4| 14| # |+| Addition | 4 + 3 | 7 | # Try operations in this code cell. # What happens when you add in parentheses? # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ## Data Types (Integers, Floats, and Strings) # # All [expressions](https://docs.constellate.org/key-terms/#expression) evaluate to a single value. In the above examples, our [expressions](https://docs.constellate.org/key-terms/#expression) evaluated to single numerical value. Numerical values come in two basic forms: # # * [integer](https://docs.constellate.org/key-terms/#integer) # * [float](https://docs.constellate.org/key-terms/#float) (or floating-point number) # # An [integer](https://docs.constellate.org/key-terms/#integer), what we sometimes call a "whole number", is a number without a decimal point that can be positive or negative. When a value uses a decimal, it is called a [float](https://docs.constellate.org/key-terms/#float) or floating-point number. Two numbers that are mathematically equivalent could be in two different data types. For example, mathematically 5 is equal to 5.0, yet the former is an [integer](https://docs.constellate.org/key-terms/#integer) while the latter is a [float](https://docs.constellate.org/key-terms/#float). # # Of course, [Python](https://docs.constellate.org/key-terms/#python) can also help us manipulate text. A snippet of text in Python is called a [string](https://docs.constellate.org/key-terms/#string). A [string](https://docs.constellate.org/key-terms/#string) can be written with single or double quotes. A [string](https://docs.constellate.org/key-terms/#string) can use letters, spaces, line breaks, and numbers. So 5 is an [integer](https://docs.constellate.org/key-terms/#integer), 5.0 is a [float](https://docs.constellate.org/key-terms/#float), but '5' and '5.0' are [strings](https://docs.constellate.org/key-terms/#string). A [string](https://docs.constellate.org/key-terms/#string) can also be blank, such as ''. # # |Familiar Name | Programming name | Examples | # |---|---|---| # |Whole number|integer| -3, 0, 2, 534| # |Decimal|float | 6.3, -19.23, 5.0, 0.01| # |Text|string| 'Hello world', '1700 butterflies', '', '1823'| # # The distinction between each of these data types may seem unimportant, but [Python](https://docs.constellate.org/key-terms/#python) treats each one differently. For example, we can ask [Python](https://docs.constellate.org/key-terms/#python) whether an [integer](https://docs.constellate.org/key-terms/#integer) is equal to a [float](https://docs.constellate.org/key-terms/#float), but we cannot ask whether a [string](https://docs.constellate.org/key-terms/#string) is equal to an [integer](https://docs.constellate.org/key-terms/#integer) or a [float](https://docs.constellate.org/key-terms/#float). # # To evaluate whether two values are equal, we can use two equals signs between them. The expression will evaluate to either `True` or `False`. # - # Run this code cell to determine whether the values are equal 42 == 42.0 # Run this code cell to compare an integer with a string 15 == 'fifteen' # Run this code cell to compare an integer with a string 15 == '15' # When we use the addition [operator](https://docs.constellate.org/key-terms/#operator) on [integers](https://docs.constellate.org/key-terms/#integer) or [floats](https://docs.constellate.org/key-terms/#float), they are added to create a sum. When we use the addition [operator](https://docs.constellate.org/key-terms/#operator) on [strings](https://docs.constellate.org/key-terms/#string), they are combined into a single, longer [string](https://docs.constellate.org/key-terms/#string). This is called [concatenation](https://docs.constellate.org/key-terms/#concatenation). # + # Combine the strings 'Hello' and 'World' # - # Notice that the [strings](https://docs.constellate.org/key-terms/#string) are combined exactly as they are written. There is no space between the [strings](https://docs.constellate.org/key-terms/#string). If we want to include a space, we need to add the space to the end of 'Hello' or the beginning of 'World'. We can also concatenate multiple [strings](https://docs.constellate.org/key-terms/#string). # + # Combine three strings # - # When we use addition [operator](https://docs.constellate.org/key-terms/#operator), the values must be all numbers or all [strings](https://docs.constellate.org/key-terms/#string). Combining them will create an error. # Try adding a string to an integer '55' + 23 # Here, we receive the error `can only concatenate str (not "int") to str`. [Python](https://docs.constellate.org/key-terms/#python) assumes we would like to join two [strings](https://docs.constellate.org/key-terms/#string) together, but it does not know how to join a [string](https://docs.constellate.org/key-terms/#string) to an [integer](https://docs.constellate.org/key-terms/#integer). Put another way, [Python](https://docs.constellate.org/key-terms/#python) is unsure if we want: # # >'55' + 23 # # to become # >'5523' # # or # >78 # We *can* multiply a [string](https://docs.constellate.org/key-terms/#string) by an [integer](https://docs.constellate.org/key-terms/#integer). The result is simply the [string](https://docs.constellate.org/key-terms/#string) repeated the appropriate number of times. # + # Multiply a string by an integer # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ## Variables # # # - # A [variable](https://docs.constellate.org/key-terms/#variable) is like a container that stores information. There are many kinds of information that can be stored in a [variable](https://docs.constellate.org/key-terms/#variable), including the data types we have already discussed ([integers](https://docs.constellate.org/key-terms/#integer), [floats](https://docs.constellate.org/key-terms/#float), and [string](https://docs.constellate.org/key-terms/#string)). We create (or *initialize*) a [variable](https://docs.constellate.org/key-terms/#variable) with an [assignment statement](https://docs.constellate.org/key-terms/#assignment-statement). The [assignment statement](https://docs.constellate.org/key-terms/#assignment-statement) gives the variable an initial value. # Initialize an integer variable and add 22 new_integer_variable = 5 new_integer_variable + 22 # The value of a [variable](https://docs.constellate.org/key-terms/#variable) can be overwritten with a new value. # + # Overwrite the value of my_favorite_number when the commented out line of code is executed. # Remove the # in the line "#my_favorite_number = 9" to turn the line into executable code. my_favorite_number = 7 my_favorite_number = 9 my_favorite_number # - # Overwriting the value of a variable using its original value cats_in_house = 1 cats_in_house = cats_in_house + 2 cats_in_house # Initialize a string variable and concatenate another string new_string_variable = 'Hello ' new_string_variable + 'World!' # You can create a [variable](https://docs.constellate.org/key-terms/#variable) with almost any name, but there are a few guidelines that are recommended. # # ### Variable Names Should be Descriptive # # If we create a [variable](https://docs.constellate.org/key-terms/#variable) that stores the day of the month, it is helpful to give it a name that makes the value stored inside it clear like `day_of_month`. From a logical perspective, we could call the [variable](https://docs.constellate.org/key-terms/#variable) almost anything (`hotdog`, `rabbit`, `flat_tire`). As long as we are consistent, the code will execute the same. When it comes time to read, modify, and understand the code, however, it will be confusing to you and others. Consider this simple program that lets us change the `days` [variable](https://docs.constellate.org/key-terms/#variable) to compute the number of seconds in that many days. # + # Compute the number of seconds in 3 days days = 3 hours_in_day = 24 minutes_in_hour = 60 seconds_in_minute = 60 days * hours_in_day * minutes_in_hour * seconds_in_minute # - # We could write a program that is logically the same, but uses confusing [variable](https://docs.constellate.org/key-terms/#variable) names. # + hotdogs = 60 sasquatch = 24 example = 3 answer = 60 answer * sasquatch * example * hotdogs # - # This code gives us the same answer as the first example, but it is confusing. Not only does this code use [variable](https://docs.constellate.org/key-terms/#variable) names that are confusing, it also does not include any comments to explain what the code does. It is not clear that we would change `example` to set a different number of days. It is not even clear what the purpose of the code is. As code gets longer and more complex, having clear [variable](https://docs.constellate.org/key-terms/#variable) names and explanatory comments is very important. # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ### Variable Naming Rules # # In addition to being descriptive, [variable](https://docs.constellate.org/key-terms/#variable) names must follow 3 basic rules: # # 1. Must be one word (no spaces allowed) # 2. Only letters, numbers and the underscore character (\_) # 3. Cannot begin with a number # # # # + # Which of these variable names are acceptable? # Comment out the variables that are not allowed in Python and run this cell to check if the variable assignment works. # If you get an error, the variable name is not allowed in Python. $variable = 1 a variable = 2 a_variable = 3 4variable = 4 variable5 = 5 variable-6 = 6 variAble = 7 Avariable = 8 # - # ### Variable Naming Style Guidelines # # The three rules above describe absolute rules of [Python](https://docs.constellate.org/key-terms/#python) [variable](https://docs.constellate.org/key-terms/#variable) naming. If you break those rules, your code will create an error and fail to execute properly. There are also style *guidelines* that, while they won't break your code, are generally advised for making your code readable and understandable. These style guidelines are written in the [Python Enhancement Proposals (PEP) Style Guide](https://www.python.org/dev/peps/pep-0008/). # # The current version of the style guide advises that [variable](https://docs.constellate.org/key-terms/#variable) names should be written: # >lowercase, with words separated by underscores as necessary to improve readability. # # If you have written code before, you may be familiar with other styles, but these notebooks will attempt to follow the PEP guidelines for style. Ultimately, the most important thing is that your [variable](https://docs.constellate.org/key-terms/#variable) names are consistent so that someone who reads your code can follow what it is doing. As your code becomes more complicated, writing detailed comments with `#` will also become more important. # + [markdown] toc-hr-collapsed=true toc-nb-collapsed=true # ## Functions # # Many different kinds of programs often need to do very similar operations. Instead of writing the same code over again, you can use a [function](https://docs.constellate.org/key-terms/#function). Essentially, a [function](https://docs.constellate.org/key-terms/#function) is a small snippet of code that can be quickly referenced. There are three kinds of [functions](https://docs.constellate.org/key-terms/#function): # # * Native [functions](https://docs.constellate.org/key-terms/#function) built into [Python](https://docs.constellate.org/key-terms/#python) # * [Functions](https://docs.constellate.org/key-terms/#function) others have written that you can import # * [Functions](https://docs.constellate.org/key-terms/#function) you write yourself # # We'll address [functions](https://docs.constellate.org/key-terms/#function) you write yourself in *Python Basics 2*. For now, let's look at a few of the native [functions](https://docs.constellate.org/key-terms/#function). One of the most common [functions](https://docs.constellate.org/key-terms/#function) used in [Python](https://docs.constellate.org/key-terms/#python) is the `print()` [function](https://docs.constellate.org/key-terms/#function) which simply prints a [string](https://docs.constellate.org/key-terms/#string). # - # A print function that prints: Hello World! print('Hello World!') # We could also define a [variable](https://docs.constellate.org/key-terms/#variable) with our [string](https://docs.constellate.org/key-terms/#string) ```'Hello World!'``` and then pass that [variable](https://docs.constellate.org/key-terms/#variable) into the `print()` function. It is common for functions to take an input, called an [argument](https://docs.constellate.org/key-terms/#argument), that is placed inside the parentheses (). # Define a string and then print it our_string = 'Hello World!' print(our_string) # There is also an `input()` [function](https://docs.constellate.org/key-terms/#function) for taking user input. # A program to greet the user by name print('Hi. What is your name?') # Ask the user for their name user_name = input() # Take the user's input and put it into the variable user_name print('Pleased to meet you, ' + user_name) # Print a greeting with the user's name # We defined a [string](https://docs.constellate.org/key-terms/#string) [variable](https://docs.constellate.org/key-terms/#variable) ```user_name``` to hold the user's input. We then called the `print()` [function](https://docs.constellate.org/key-terms/#function) to print the [concatenation](https://docs.constellate.org/key-terms/#concatenate) of 'Pleased to meet you, ' and the user's input that was captured in the [variable](https://docs.constellate.org/key-terms/#variable) ```user_name```. Remember that we can use a ```+``` to [concatenate](https://docs.constellate.org/key-terms/#concatenate), meaning join these [strings](https://docs.constellate.org/key-terms/#string) together. # # Here are couple more tricks we can use. You can pass a string variable into the `input` function for a prompt and you can use an `f string` to add the variable into the print string without use the `+` operator to concatenate both strings. # A program to greet the user by name user_name = input('Hi. What is your name? ') print(f'Pleased to meet you, {user_name}') # We can [concatenate](https://docs.constellate.org/key-terms/#concatenate) many [strings](https://docs.constellate.org/key-terms/#string) together, but we cannot [concatenate](https://docs.constellate.org/key-terms/#concatenate) [strings](https://docs.constellate.org/key-terms/#string) with [integers](https://docs.constellate.org/key-terms/#integer) or [floats](https://docs.constellate.org/key-terms/#float). # Concatenating many strings within a print function print('Hello, ' + 'all ' + 'these ' + 'strings ' + 'are ' + 'being ' + 'connected ' + 'together.') # Trying to concatenate a string with an integer causes an error print('There are ' + 7 + 'continents.') # We can transform one [variable](https://docs.constellate.org/key-terms/#variable) type into another [variable](https://docs.constellate.org/key-terms/#variable) type with the `str()`, `int()`, and `float()` [functions](https://docs.constellate.org/key-terms/#function). Let's convert the [integer](https://docs.constellate.org/key-terms/#integer) above into a [string](https://docs.constellate.org/key-terms/#string) so we can [concatenate](https://docs.constellate.org/key-terms/#concatenate) it. print('There are ' + str(7) + ' continents.') # Mixing [strings](https://docs.constellate.org/key-terms/#string) with [floats](https://docs.constellate.org/key-terms/#float) and [integers](https://docs.constellate.org/key-terms/#integer) can have unexpected results. See if you can spot the problem with the program below. # + # A program to tell a user how many months old they are user_age = input('How old are you? ') # Take the user input and put it into the variable user_age number_of_months = user_age * 12 # Define a new variable number_of_months that multiplies the user's age by 12 print('That is more than ' + number_of_months + ' months old!' ) # Print a response that tells the user they are at least number_of_months old # - # In order to compute the [variable](https://docs.constellate.org/key-terms/#variable) ```number_of_months```, we multiply ```user_age``` by 12. The problem is that ```user_age``` is a [string](https://docs.constellate.org/key-terms/#string). Multiplying a [string](https://docs.constellate.org/key-terms/#string) by 12 simply makes the string repeat 12 times. After the user gives us their age, we need that input to be converted to an [integer](https://docs.constellate.org/key-terms/#integer). Can you fix the program? # + [markdown] toc-hr-collapsed=true # ___ # ## Lesson Complete # # Congratulations! You have completed *Python Basics* 1. There are two more lessons in *Python Basics*: # # * *Python Basics 2* # * *Python Basics 3* # # ### Python Basics 1 Quiz # # If you would like to check your understading of this lesson, you can [take this quick quiz](https://docs.google.com/forms/d/e/1FAIpQLSdTPq_BotRY_eqJIfIXT2OoWkv9MwgOKngWcTYJfilwbQ6eAQ/viewform?usp=sf_link). # # ### Start Next Lesson: [Python Basics 2](./python-basics-2.ipynb) # -
python-basics-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Install Seaborn. # If you have Python and PIP already installed on a system, install it using this command: # # #### C:\Users\Your Name>pip install seaborn # If you use Jupyter, install Seaborn using this command: # # #### C:\Users\Your Name>!pip install seaborn import numpy as np from numpy import random import matplotlib.pyplot as plt import seaborn as sns # # Distplots # Distplot stands for distribution plot, it takes as input an array and plots a curve corresponding to the distribution of points in the array. sns.distplot([0, 1, 2, 3, 4, 5]) plt.show() # ### Plotting a Distplot Without the Histogram sns.distplot([0, 1, 2, 3, 4, 5], hist=False) plt.show()
Python Library/NumPy/NumPy_Random/Seaborn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/suyash091/PIMA-INDIAN-DIABETES/blob/master/Diabetes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="2ihxoPWI0EOq" colab_type="text" # #**IMPORTING THE DATASET** # # + id="1fUMKp8g0W0n" colab_type="code" outputId="88e060e4-359c-<KEY>" colab={"base_uri": "https://localhost:8080/", "height": 68} import os os.environ['KAGGLE_USERNAME'] = "suyash091" os.environ['KAGGLE_KEY'] = "9775b0a6da90b12a0c284d71e498de43" # #!kaggle datasets download -d sulianova/cardiovascular-disease-dataset #CardioVascular # !kaggle datasets download -d uciml/pima-indians-diabetes-database # + [markdown] id="2TpZrUeO49jP" colab_type="text" # **Extracting zipfile** # + id="6DlGlCnZ5GE3" colab_type="code" colab={} # #!unzip -q '/content/cardiovascular-disease-dataset.zip' # !unzip -q '/content/pima-indians-diabetes-database.zip' # + id="MCOahQPn5n0c" colab_type="code" colab={} import pandas as pd # + [markdown] id="6O_kGB9B5Veb" colab_type="text" # **Data Preprocessing** # + id="xY2b03vQ5azX" colab_type="code" colab={} #Fixing structure # Read in the file with open('/content/ALF_Data.csv', 'r') as file : filedata = file.read() # Replace the target string filedata = filedata.replace(';', ',') # Write the file out again with open('/content/ALF_Data.csv', 'w') as file: file.write(filedata) # + id="EtxdlDft67IJ" colab_type="code" outputId="3d5dc8db-6e2f-4b21-ceb5-493a94ce0b5a" colab={"base_uri": "https://localhost:8080/", "height": 317} #Load Data cvd=pd.read_csv('/content/diabetes.csv').dropna()#.apply(lambda x: x/x.max(), axis=0) cvd.describe() # + id="Bg2pN04nBhi2" colab_type="code" outputId="23816b16-eb14-4fb9-b3dc-2806f40cfa96" colab={"base_uri": "https://localhost:8080/", "height": 204} cvd.head() # + id="stv-zpQGE9KL" colab_type="code" colab={} #For K-fold cross validation from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=100) outcome_var = ['Outcome'] predictor_var = ['Pregnancies', 'Glucose', 'BloodPressure','SkinThickness','Insulin','BMI', 'DiabetesPedigreeFunction','Age'] # + id="fC72kyLMGxWG" colab_type="code" colab={} #Import models from scikit learn module: from sklearn.linear_model import LogisticRegression from sklearn.model_selection import KFold from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn import metrics import numpy as np #Generic function for making a classification model and accessing performance: def classification_model(model, data, predictors, outcome): model.fit(data[predictors],data[outcome].values.ravel()) predictions = model.predict(data[predictors]) accuracy = metrics.accuracy_score(predictions,data[outcome]) print('Accuracy : %s' % '{0:.3%}'.format(accuracy)) kf = KFold(n_splits=5) error = [] for train, test in kf.split(data[predictors]): train_predictors = (data[predictors].iloc[train,:]) train_target = data[outcome].iloc[train] model.fit(train_predictors, train_target.values.ravel()) error.append(model.score(data[predictors].iloc[test,:], data[outcome].iloc[test])) print('Cross-Validation Score : %s' % '{0:.3%}'.format(np.mean(error))) return model.fit(data[predictors],data[outcome].values.ravel()) # + id="kLLxPloyGvws" colab_type="code" outputId="a3cc998e-843f-42e0-9137-b97089fbb588" colab={"base_uri": "https://localhost:8080/", "height": 51} #outcome_var = ['winner'] #predictor_var = ['team1', 'team2', 'venue', 'toss_winner','city','toss_decision'] history=classification_model(model, cvd,predictor_var,outcome_var) # + id="QcPor5e7W3IZ" colab_type="code" colab={} #inp=list(map(str,[18393, 2, 168, 62.0, 110, 80, 1, 1, 0, 0, 1])) inp=list(map(str,'63 1 3 145 233 1 0 150 0 2.3 0 0 1'.split())) inp = np.array(inp).reshape((1, -1)) output=model.predict(inp) # + id="KaxmUXeuXO1v" colab_type="code" outputId="a525aeb3-7de7-4190-e444-c9822e6243cb" colab={"base_uri": "https://localhost:8080/", "height": 34} print(output) # + id="TTdumrCPUqZQ" colab_type="code" outputId="f6561570-8b33-4688-e9b4-5affac064287" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn.externals import joblib joblib.dump(model, 'acuteliverfailure.pkl') # + id="gzux2uE6tFOu" colab_type="code" colab={} import pickle filename = 'Diabetes.sav' pickle.dump(model, open(filename, 'wb')) # + id="Or1ZExjKtYUP" colab_type="code" colab={} loaded_model = pickle.load(open(filename, 'rb')) # + id="pZdLMgFjtlfl" colab_type="code" colab={} # + id="inI5dpcUkSKg" colab_type="code" colab={} from sklearn.externals import joblib import pandas as pd import numpy as np #clf = joblib.load('Cardiomodel.pkl') #inp=list(map(str,[18393, 2, 168, 62.0, 110, 80, 1, 1, 0, 0, 1])) inp=list(map(str,'6 148 72 35 0 33.6 0.627 50'.split())) inp = np.array(inp).reshape((1, -1)) result = loaded_model.predict(inp) # + id="Ilzud-FXkcGy" colab_type="code" outputId="443ac49d-f679-49ea-91e1-e8a407e2a6c6" colab={"base_uri": "https://localhost:8080/", "height": 34} print(result) # + id="x1stJI12uSGX" colab_type="code" outputId="151cc479-ff99-4a8e-9532-80f719d47074" colab={"base_uri": "https://localhost:8080/", "height": 122} from google.colab import drive drive.mount('/content/drive') # + id="WG9_gIHounnI" colab_type="code" colab={} # !mv '/content/Diabetes.sav' '/content/drive/My Drive/rackathon/Diabetes.sav'
Diabetes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 from nb_002 import * # - DATA_PATH = Path('../data') PATH = DATA_PATH/'imagenet' # Test of all the different possiblities for a pipeline on imagenet including: # - resizing the image so that the lower dimension is 224 # - random rotate -10 to 10 degrees # - random scale 0.9 to 1.1 # - random flip # - random crop # # Test on the first 100 batches of imagenet (with shuffle=False) # # Torchvision import torchvision class TVFilesDataset(Dataset): def __init__(self, folder, tfms): cls_dirs = find_classes(folder) self.fns, self.y = [], [] self.classes = [cls.name for cls in cls_dirs] for i, cls_dir in enumerate(cls_dirs): fnames = get_image_files(cls_dir) self.fns += fnames self.y += [i] * len(fnames) self.tfms = torchvision.transforms.Compose(tfms) def __len__(self): return len(self.fns) def __getitem__(self,i): x = Image.open(self.fns[i]).convert('RGB') x = self.tfms(x) return x,self.y[i] class DeviceDataLoader(): def __init__(self, dl, device, stats): self.dl,self.device = dl,device self.m, self.s = map(lambda x:torch.tensor(x, dtype=torch.float32, device=device), stats) def __iter__(self): for b in self.dl: x, y = b[0].to(self.device),b[1].to(self.device) x = (x - self.m[None,:,None,None]) / self.s[None,:,None,None] yield x,y def __len__(self): return (len(self.dl)) def get_dataloader(ds, bs, shuffle, stats, device = None, sampler=None): if device is None: device = default_device dl = DataLoader(ds, batch_size=bs, shuffle=shuffle,num_workers=8, sampler=sampler, pin_memory=True) return DeviceDataLoader(dl, device, stats) sz, bs = 224, 192 stats = (np.array([0.485, 0.456, 0.406]), np.array([0.229, 0.224, 0.225])) train_tfms = [torchvision.transforms.RandomRotation(10), torchvision.transforms.RandomResizedCrop(sz, scale=(0.5, 1.0), ratio=(1.,1.)), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor()] train_ds = TVFilesDataset(PATH/'train', train_tfms) default_device = default_device = torch.device('cuda', 0) train_dl = get_dataloader(train_ds, bs, shuffle=False, stats=stats) train_iter = iter(train_dl) # %time for i in tqdm(range(100)): x,y = next(train_iter) # 37.3s # # Pipeline with grid_sampler # Needs the PR https://github.com/pytorch/pytorch/pull/9961/files to make grid_sample fast. # + def dict_groupby(iterable, key=None): return {k:list(v) for k,v in itertools.groupby(sorted(iterable, key=key), key=key)} def resolve_pipeline(tfms, **kwargs): tfms = listify(tfms) if len(tfms)==0: return noop grouped_tfms = dict_groupby(tfms, lambda o: o.__annotations__['return']) lighting_tfms,coord_tfms,affine_tfms,pixel_tfms,final_tfms = map(grouped_tfms.get, TfmType) lighting_tfm = apply_lighting_tfms(lighting_tfms) affine_tfm = compose_affine_tfms(affine_tfms, funcs=coord_tfms, **kwargs) pixel_tfm = compose_tfms(pixel_tfms) final_tfm = compose_tfms(final_tfms) return lambda x,**k: final_tfm(affine_tfm(lighting_tfm(pixel_tfm(x)), **k)) # - class TransformedFilesDataset(Dataset): def __init__(self, folder, sz, tfms=None, classes=None): self.fns, self.y = [], [] if classes is None: classes = [cls.name for cls in find_classes(folder)] self.classes = classes for i, cls in enumerate(classes): fnames = get_image_files(folder/cls) self.fns += fnames self.y += [i] * len(fnames) self.sz, self.tfms = sz, tfms def __len__(self): return len(self.fns) def __getitem__(self,i): x = PIL.Image.open(self.fns[i]).convert('RGB') x = pil2tensor(x) if self.tfms is not None: x = resolve_pipeline(self.tfms, size=self.sz)(x) return x,self.y[i] sz, bs = 224, 64 stats = (np.array([0.485, 0.456, 0.406]), np.array([0.229, 0.224, 0.225])) train_tfms = [flip_lr_tfm(p=0.5), zoom_tfm(scale=(0.9,1.1),p=0.75), rotate_tfm(degrees=(-10,10.),p=0.75), crop_tfm(size=sz)] train_ds = TransformedFilesDataset(PATH/'train', sz, train_tfms) train_dl = get_dataloader(train_ds, bs, shuffle=False, stats=stats) train_iter = iter(train_dl) # %time for i in tqdm(range(100)): x,y = next(train_iter) # 31.5s # Now without affine augmentation sz, bs = 224, 64 stats = (np.array([0.485, 0.456, 0.406]), np.array([0.229, 0.224, 0.225])) train_tfms = [flip_lr_tfm(p=0.5), #zoom_tfm(scale=(0.9,1.1),p=0.75), #rotate_tfm(degrees=(-10,10.),p=0.75), crop_tfm(size=sz)] train_ds = TransformedFilesDataset(PATH/'train', sz, train_tfms) train_dl = get_dataloader(train_ds, bs, shuffle=False, stats=stats) train_iter = iter(train_dl) # %time for i in tqdm(range(100)): x,y = next(train_iter) # 28s # # With PIL from PIL import Image class TransformedFilesDataset(Dataset): def __init__(self, folder, sz, tfms=None, classes=None): self.fns, self.y = [], [] if classes is None: classes = [cls.name for cls in find_classes(folder)] self.classes = classes for i, cls in enumerate(classes): fnames = get_image_files(folder/cls) self.fns += fnames self.y += [i] * len(fnames) self.sz, self.tfms = sz, tfms def __len__(self): return len(self.fns) def __getitem__(self,i): x = PIL.Image.open(self.fns[i]).convert('RGB') w,h = x.size if w < h: w,h = self.sz,int(self.sz * h / w) else: w,h = int(self.sz * w / h),self.sz theta = random.uniform(-10,10) * math.pi / 180 if random.random() < 0.75 else 0 scale = random.uniform(0.9,1.1) if random.random() < 0.75 else 1 x = x.transform((w,h), Image.AFFINE, (cos(theta)/scale, -sin(theta), 0, sin(theta), cos(theta)/scale, 0), Image.BILINEAR) x = pil2tensor(x) if self.tfms is not None: x = resolve_pipeline(self.tfms)(x) return x,self.y[i] sz, bs = 224, 64 stats = (np.array([0.485, 0.456, 0.406]), np.array([0.229, 0.224, 0.225])) train_tfms = [flip_lr_tfm(p=0.5), #zoom_tfm(scale=(0.9,1.1),p=0.75), Those are done in the dataset #rotate_tfm(degrees=(-10,10.),p=0.75), crop_tfm(size=sz)] train_ds = TransformedFilesDataset(PATH/'train', sz, train_tfms) train_dl = get_dataloader(train_ds, bs, shuffle=False, stats=stats) train_iter = iter(train_dl) # %time for i in tqdm(range(100)): x,y = next(train_iter) # 15.8s ^^ # Just PIL resize class TransformedFilesDataset(Dataset): def __init__(self, folder, sz, tfms=None, classes=None): self.fns, self.y = [], [] if classes is None: classes = [cls.name for cls in find_classes(folder)] self.classes = classes for i, cls in enumerate(classes): fnames = get_image_files(folder/cls) self.fns += fnames self.y += [i] * len(fnames) self.sz, self.tfms = sz, tfms def __len__(self): return len(self.fns) def __getitem__(self,i): x = PIL.Image.open(self.fns[i]).convert('RGB') w,h = x.size if w < h: w,h = self.sz,int(self.sz * h / w) else: w,h = int(self.sz * w / h),self.sz x = x.resize((w,h)) x = pil2tensor(x) x = F.interpolate(x[None], size=(self.sz,self.sz),mode='bilinear') if self.tfms is not None: x = resolve_pipeline(self.tfms)(x[0]) return x,self.y[i] sz, bs = 224, 64 stats = (np.array([0.485, 0.456, 0.406]), np.array([0.229, 0.224, 0.225])) train_tfms = [flip_lr_tfm(p=0.5), #zoom_tfm(scale=(0.9,1.1),p=0.75), #rotate_tfm(degrees=(-10,10.),p=0.75), crop_tfm(size=sz)] train_ds = TransformedFilesDataset(PATH/'train', sz, train_tfms) train_dl = get_dataloader(train_ds, bs, shuffle=False, stats=stats) train_iter = iter(train_dl) # %time for i in tqdm(range(100)): x,y = next(train_iter) # 11.6s # # Just F.interpolate class TransformedFilesDataset(Dataset): def __init__(self, folder, sz, tfms=None, classes=None): self.fns, self.y = [], [] if classes is None: classes = [cls.name for cls in find_classes(folder)] self.classes = classes for i, cls in enumerate(classes): fnames = get_image_files(folder/cls) self.fns += fnames self.y += [i] * len(fnames) self.sz, self.tfms = sz, tfms def __len__(self): return len(self.fns) def __getitem__(self,i): x = PIL.Image.open(self.fns[i]).convert('RGB') x = pil2tensor(x) x = F.interpolate(x[None], size=(self.sz,self.sz),mode='bilinear') if self.tfms is not None: x = resolve_pipeline(self.tfms)(x[0]) return x,self.y[i] sz, bs = 224, 64 stats = (np.array([0.485, 0.456, 0.406]), np.array([0.229, 0.224, 0.225])) train_tfms = [flip_lr_tfm(p=0.5), #zoom_tfm(scale=(0.9,1.1),p=0.75), #rotate_tfm(degrees=(-10,10.),p=0.75), crop_tfm(size=sz)] train_ds = TransformedFilesDataset(PATH/'train', sz, train_tfms) train_dl = get_dataloader(train_ds, bs, shuffle=False, stats=stats) train_iter = iter(train_dl) # %time for i in tqdm(range(100)): x,y = next(train_iter) # 13.3s # # On the GPU # grid_sample used to be faster on the GPU so let's try to do that step there by moving the image on the GPU during the affine transformation. def do_affine(img, m=None, func=None, size=None, **kwargs): img = img.cuda(non_blocking=True) if size is None: size = img.size() elif isinstance(size, int): if img.size(1) < img.size(2): size = (img.size(0),size,int(img.size(2)*size/img.size(1))) else: size = (img.size(0),int(img.size(1)*size/img.size(2)),size) if m is None: if img.shape==size: return img else: m=eye_new(img, 3) m = m.cuda(non_blocking=True) c = affine_grid(img, img.new_tensor(m), size=size) if func is not None: c = func(c) return grid_sample(img, c, **kwargs) # + def dict_groupby(iterable, key=None): return {k:list(v) for k,v in itertools.groupby(sorted(iterable, key=key), key=key)} def resolve_pipeline(tfms, **kwargs): tfms = listify(tfms) if len(tfms)==0: return noop grouped_tfms = dict_groupby(tfms, lambda o: o.__annotations__['return']) lighting_tfms,coord_tfms,affine_tfms,pixel_tfms,final_tfms = map(grouped_tfms.get, TfmType) lighting_tfm = apply_lighting_tfms(lighting_tfms) affine_tfm = compose_affine_tfms(affine_tfms, funcs=coord_tfms, **kwargs) pixel_tfm = compose_tfms(pixel_tfms) final_tfm = compose_tfms(final_tfms) return lambda x,**k: final_tfm(affine_tfm(lighting_tfm(pixel_tfm(x)), **k)) # - class TransformedFilesDataset(Dataset): def __init__(self, folder, sz, tfms=None, classes=None): self.fns, self.y = [], [] if classes is None: classes = [cls.name for cls in find_classes(folder)] self.classes = classes for i, cls in enumerate(classes): fnames = get_image_files(folder/cls) self.fns += fnames self.y += [i] * len(fnames) self.sz, self.tfms = sz, tfms def __len__(self): return len(self.fns) def __getitem__(self,i): x = PIL.Image.open(self.fns[i]).convert('RGB') x = pil2tensor(x) if self.tfms is not None: x = resolve_pipeline(self.tfms, size=self.sz)(x) return x,self.y[i] sz, bs = 224, 64 stats = (np.array([0.485, 0.456, 0.406]), np.array([0.229, 0.224, 0.225])) train_tfms = [flip_lr_tfm(p=0.5), zoom_tfm(scale=(0.9,1.1),p=0.75), rotate_tfm(degrees=(-10,10.),p=0.75), crop_tfm(size=sz)] class DeviceDataLoader(): def __init__(self, dl, device, stats): self.dl,self.device = dl,device self.m, self.s = map(lambda x:torch.tensor(x, dtype=torch.float32, device=device), stats) def __iter__(self): for b in self.dl: #x, y = b[0].to(self.device),b[1].to(self.device) x = (x - self.m[None,:,None,None]) / self.s[None,:,None,None] yield x,y def __len__(self): return (len(self.dl)) def get_dataloader(ds, bs, shuffle, stats, device = None, sampler=None): if device is None: device = default_device dl = DataLoader(ds, batch_size=bs, shuffle=shuffle,num_workers=8, sampler=sampler, pin_memory=True) return DeviceDataLoader(dl, device, stats) # + class DeviceDataLoader(): def __init__(self, dl, device, stats): self.dl,self.device = dl,device self.m, self.s = map(lambda x:torch.tensor(x, dtype=torch.float32, device=device), stats) def __iter__(self): for b in self.dl: x, y = b[0].to(self.device),b[1].to(self.device) x = (x - self.m[None,:,None,None]) / self.s[None,:,None,None] yield x,y def __len__(self): return (len(self.dl)) def get_dataloader(ds, bs, shuffle, stats, device = None, sampler=None): if device is None: device = default_device dl = DataLoader(ds, batch_size=bs, shuffle=shuffle,num_workers=8, sampler=sampler, pin_memory=True) return DeviceDataLoader(dl, device, stats) # - train_ds = TransformedFilesDataset(PATH/'train', sz, train_tfms) train_dl = get_dataloader(train_ds, bs, shuffle=False, stats=stats) train_iter = iter(train_dl) # %time for i in tqdm(range(100)): x,y = next(train_iter) # No significant change from the CPU.
dev_nb/experiments/pipelines_speed_comp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Pandas 简介 # **学习目标:** # * 大致了解 *pandas* 库的 `DataFrame` 和 `Series` 数据结构 # * 存取和处理 `DataFrame` 和 `Series` 中的数据 # * 将 CSV 数据导入 pandas 库的 `DataFrame` # * 对 `DataFrame` 重建索引来随机打乱数据 # [*pandas*](http://pandas.pydata.org/) 是一种列存数据分析 API。 # 它是用于处理和分析输入数据的强大工具,很多机器学习框架都支持将 *pandas* 数据结构作为输入。 # 虽然全方位介绍 *pandas* API 会占据很长篇幅,但它的核心概念非常简单,我们会在下文中进行说明。 # 有关更完整的参考,请访问 [*pandas* 文档网站](http://pandas.pydata.org/pandas-docs/stable/index.html),其中包含丰富的文档和教程资源。 # ## 基本概念 # # 以下行导入了 *pandas* API 并输出了相应的 API 版本: # + from __future__ import print_function import pandas as pd pd.__version__ # - # *pandas* 中的主要数据结构被实现为以下两类: # # * **`DataFrame`**,您可以将它想象成一个关系型数据表格,其中包含多个行和已命名的列。 # * **`Series`**,它是单一列。`DataFrame` 中包含一个或多个 `Series`,每个 `Series` 均有一个名称。 # # 数据框架是用于数据操控的一种常用抽象实现形式。[Spark](https://spark.apache.org/) 和 [R](https://www.r-project.org/about.html) 中也有类似的实现。 # 创建 `Series` 的一种方法是构建 `Series` 对象。例如: pd.Series(['San Francisco', 'San Jose', 'Sacramento']) # 您可以将映射 `string` 列名称的 `dict` 传递到它们各自的 `Series`,从而创建`DataFrame`对象。 # 如果 `Series` 在长度上不一致,系统会用特殊的 [NA/NaN](http://pandas.pydata.org/pandas-docs/stable/missing_data.html) 值填充缺失的值。 # 例如: # + city_names = pd.Series(['San Francisco', 'San Jose', 'Sacramento']) population = pd.Series([852469, 1015785, 485199]) pd.DataFrame({ 'City name': city_names, 'Population': population }) # - # 但是在大多数情况下,您需要将整个文件加载到 `DataFrame` 中。 # 下面的示例加载了一个包含加利福尼亚州住房数据的文件。 # 请运行以下单元格以加载数据,并创建特征定义: california_housing_dataframe = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv", sep=",") california_housing_dataframe.describe() # 上面的示例使用 `DataFrame.describe` 来显示关于 `DataFrame` 的有趣统计信息。 # 另一个实用函数是 `DataFrame.head`,它显示 `DataFrame` 的前几个记录: california_housing_dataframe.head() # *pandas* 的另一个强大功能是绘制图表。 # 例如,借助 `DataFrame.hist`,您可以快速了解一个列中值的分布: california_housing_dataframe.hist('housing_median_age') # ## 访问数据 # # 您可以使用熟悉的 Python dict/list 指令访问 `DataFrame` 数据: cities = pd.DataFrame({ 'City name': city_names, 'Population': population }) print(type(cities['City name'])) cities['City name'] print(type(cities['City name'][1])) cities['City name'][1] print(type(cities[0:2])) cities[0:2] # 此外,*pandas* 针对高级[索引和选择](http://pandas.pydata.org/pandas-docs/stable/indexing.html)提供了极其丰富的 API(数量过多,此处无法逐一列出)。 # ## 操控数据 # # 您可以向 `Series` 应用 Python 的基本运算指令。例如: population / 1000. # [NumPy](http://www.numpy.org/) 是一种用于进行科学计算的常用工具包。*pandas* `Series` 可用作大多数 NumPy 函数的参数: # + import numpy as np np.log(population) # - # 对于更复杂的单列转换,您可以使用 `Series.apply`。 # 像 Python [映射函数](https://docs.python.org/2/library/functions.html#map)一样,`Series.apply` 将以参数形式接受 [lambda 函数](https://docs.python.org/2/tutorial/controlflow.html#lambda-expressions),而该函数会应用于每个值。 # # 下面的示例创建了一个指明 `population` 是否超过 100 万的新 `Series`: population.apply(lambda val: val > 1000000) # `DataFrames` 的修改方式也非常简单。例如,以下代码向现有 `DataFrame` 添加了两个 `Series`: cities['Area square miles'] = pd.Series([46.87, 176.53, 97.92]) cities['Population density'] = cities['Population'] / cities['Area square miles'] cities # ## 练习 1 # # 通过添加一个新的布尔值列(当且仅当以下*两项*均为 True 时为 True)修改 `cities` 表格: # # * 城市以圣人命名。 # * 城市面积大于 50 平方英里。 # # **注意:**布尔值 `Series` 是使用“按位”而非传统布尔值“运算符”组合的。例如,执行*逻辑与*时,应使用 `&`,而不是 `and`。 # # **提示:**"San" 在西班牙语中意为 "saint"。 cities['Is wide and has saint name'] = (cities['Area square miles'] > 50) & cities['City name'].apply(lambda name: name.startswith('San')) cities # ## 索引 # `Series` 和 `DataFrame` 对象也定义了 `index` 属性,该属性会向每个 `Series` 项或 `DataFrame` 行赋一个标识符值。 # # 默认情况下,在构造时,*pandas* 会赋可反映源数据顺序的索引值。索引值在创建后是稳定的;也就是说,它们不会因为数据重新排序而发生改变。 city_names.index cities.index # 调用 `DataFrame.reindex` 以手动重新排列各行的顺序。例如,以下方式与按城市名称排序具有相同的效果: cities.reindex([2, 0, 1]) # 重建索引是一种随机排列 `DataFrame` 的绝佳方式。 # 在下面的示例中,我们会取用类似数组的索引,然后将其传递至 NumPy 的 `random.permutation` 函数,该函数会随机排列其值的位置。 # 如果使用此重新随机排列的数组调用 `reindex`,会导致 `DataFrame` 行以同样的方式随机排列。 # 尝试多次运行以下单元格! cities.reindex(np.random.permutation(cities.index)) # ## 练习 2 # # `reindex` 方法允许使用未包含在原始 `DataFrame` 索引值中的索引值。 # 请试一下,看看如果使用此类值会发生什么!您认为允许此类值的原因是什么? # 如果您的 `reindex` 输入数组包含原始 `DataFrame` 索引值中没有的值,`reindex` 会为此类“丢失的”索引添加新行,并在所有对应列中填充 `NaN` 值: cities.reindex([0, 4, 5, 2]) # 这种行为是可取的,因为索引通常是从实际数据中提取的字符串(请参阅 [*pandas* reindex 文档](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.reindex.html),查看索引值是浏览器名称的示例)。 # # 在这种情况下,如果允许出现“丢失的”索引,您将可以轻松使用外部列表重建索引,因为您不必担心会将输入清理掉。
tensorflow_seedbank/intro_to_pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This example computes the annual cycle for daily SST data in the NINO 3.4 region using the first two harmonics. It then computes the SST anomalies using the smoothed annual cycle and then calculates the covariance matrix in preparation for EOF analysis. # Import Python Libraries import os import numpy as np import numpy.ma as ma import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd from IPython.display import Image, display import scipy.io as sio import xarray as xr import matplotlib.dates as mdates from eofs.xarray import Eof import cartopy.crs as ccrs import cartopy.feature as cfeature from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import matplotlib.ticker as mticker # 1. Read the data and rearrange the longitudes so 180 is in the center of the grid. filename_pattern = '/home/voyager-sbarc/arc/sst/hadisst/HadISST_sst.nc' ds = xr.open_dataset(filename_pattern) ds ds_rolled = ds.roll(longitude=180, roll_coords=True) ds_rolled # 2. Crop the data to 30S-30N. ds_subset = ds.sel(latitude=slice(30,-30)) ds_subset # 3. Compute the annual cycle of SST fitting the first and second harmonics. climatology = ds_subset.groupby('time.month').mean('time') climatology # Create 1st two harmonics and add together # + def fourier(ds,p): time = ds.month n = len(time) ds,time = xr.broadcast(ds,time) f = 2.*np.pi*p/n ft = f*time sum_a = ds*np.cos(ft - 1.) sum_b = ds*np.sin(ft - 1.) coef_a = (2./n)*sum_a.sum('month',skipna=True) coef_b = (2./n)*sum_b.sum('month',skipna=True) return ft,coef_a, coef_b def harmonic(ds): a0 = ds.mean('month',skipna=True) #-First Harmonic p = 1 ft,coef_a,coef_b = fourier(ds,p) harm1 = a0 + coef_a*np.cos(ft-1.) + coef_b*np.sin(ft-1.) #-Second Harmonic p = 2 ft,coef_a,coef_b = fourier(ds,p) harm2 = a0 + coef_a*np.cos(ft-1.) + coef_b*np.sin(ft-1.) #-First plus second combo = harm1 + coef_a*np.cos(ft-1.) + coef_b*np.sin(ft-1.) return combo # - annual_cycle = harmonic(climatology) annual_cycle # 4. Remove the annual cycle from the monthly SST in each gridpoint anomalies = ds_subset.groupby('time.month') - annual_cycle anomalies.sst.shape # 5. Remove the long-term mean in each gridpoint - already done via harmonics # + long_term_mean = anomalies.sst[:,:,:,0].mean('time', skipna=True) print(long_term_mean.shape) sst = anomalies.sst[:,:,:,0] - long_term_mean # - # 6. Weight the SST anomaly value in each gridpoint by the square of the cosine of the latitude # Weight the data set according to weighting argument. coslat = np.cos(np.deg2rad(sst.coords['latitude'].values)) wgts = np.sqrt(coslat) wgts = xr.DataArray(wgts, coords=[sst.latitude], dims=['latitude']) sst,weights = xr.broadcast(sst,wgts) weighted_data = sst * weights print(weighted_data.shape) print(weights) # 7. Plot the standard deviation of the SST anomalies std_dev = np.std(weighted_data, axis=0) std_rolled = std_dev.roll(longitude=180, roll_coords=True) plt.figure(figsize=[16., 4.]) ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=-179.5)) ax.set_extent([sst.longitude.min(), sst.longitude.max(), sst.latitude.min(), sst.latitude.max()], crs=ccrs.PlateCarree(central_longitude=-179.5)) cf = plt.contourf(sst.longitude, sst.latitude, std_dev, transform=ccrs.PlateCarree()) plt.title('Standard Deviation of SST Anomalies') plt.colorbar(cf, orientation='horizontal') ax.coastlines() plt.show() # 8a. The SST data consist of 1787 points in time by 21600 points in space. # + [numtimes, numlats, numlons] = weighted_data.shape ## flatten lat and lon into 1 spatial location identifier X = np.reshape(weighted_data.values, [numtimes, numlons*numlats]) X.shape # - # Find the indices of values that are not missing in one row. All the # rows will have missing values in the same places provided the # array was centered. If it wasn't then it is possible that some # missing values will be missed and the singular value decomposition # will produce not a number for everything. nonMissingIndex = np.where(np.logical_not(np.isnan(X[0])))[0] # Remove missing values from the design matrix. dataNoMissing = X[:, nonMissingIndex] print(dataNoMissing.shape) # 8b. Compute the covariance matrix of SST anomalies using the time dimension ## Compute Covariance Matrix numtimes, numnonMissing = dataNoMissing.shape A = np.dot(dataNoMissing,np.transpose(dataNoMissing))/numnonMissing A.shape plt.contourf(A) plt.colorbar() plt.title('Covariance Matrix') A, Lh, E = np.linalg.svd(A) print(A.shape) print(Lh.shape) print(E.shape)
advanced_examples/calculate_covariance_matrix.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp workflows # - # # workflows # > Execute workflows on Server #hide from yx_motor.tests.utils.unit_test_helpers import ( pickle_object, unpickle_object, workflow_test_pickles ) #hide # just removing the insecure warning for now # TODO: Secure requests and remove this code import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # + # export from typing import List import requests from yx_motor.api import API from yx_motor.files import Files from yx_motor.jobs import Jobs class Workflows: "Class for workflow-related API actions" def __init__(self, api: API): self.api = api self.base_endpoint = "workflows/" self.files = Files(self.api) self.jobs = Jobs(self.api) @staticmethod def build_schedule_payload(schedule_name: str, asset_id: str): return {"schedule": {"name": schedule_name, "assetId": asset_id,}} def get_questions(self, asset_id: str): """Return question payload for an asset of type analytic app.""" response = self.api.post(url=f"workflows/{asset_id}/questions") return response def get_vfs_inputs(self, asset_id: str): """Return the list of vfs inputs for a given workflow asset.""" response = self.api.get(url=f"workflows/{asset_id}/vfsInputs") return response def update_vfs_inputs(self, asset_id: str, tool_id: str): pass def get_workflow_dependencies(self, asset_id: str): """Return the list of asset dependencies for a given workflow asset""" response = self.api.get(url=f"workflows/{asset_id}/dependencies") return response def update_workflow_dependencies(self, asset_id: str): pass def add_workflow_dependency(self, asset_id: str): pass def add_workflow_dependency_connection(self, asset_id: str, tool_id: str): pass def run_workflow( self, asset_id: str, schedule_name: str, vfs_inputs: List[str] = None ): """Schedule a workflow to be executed instantaneously. Returns a schedule object.""" response = self.api.post( url="workflows/run", json=self.build_schedule_payload(schedule_name, asset_id), ) return response def download_workflow_results(self, schedule_id: str, download_path: str): # TODO: Input validation workflow_job = self.jobs.get_job(params={"scheduleId": schedule_id}) # TODO: Need to check the status of the job, for completion # and/or error state before trying to get outputs. # TODO: Add validation, error handling here output_asset_id = self.get_wf_job_output(workflow_job)[0] response = self.files.download_file( file_uuid=output_asset_id, download_path=download_path ) return response @staticmethod def get_wf_job_output(wf_job: object): jobs_object = wf_job.json()["jobs"] job = jobs_object[0] outputs = job["outputs"] output_asset_id_list = [file["assetId"] for file in outputs] return output_asset_id_list # - from nbdev.showdoc import * show_doc(Workflows.get_vfs_inputs) # **Arguments**: # # - asset_id: Unique VFS identifier for the workflow you want to execute. # + #hide #Unit test code for get_vfs_inputs from unittest.mock import Mock #TODO: write code to use the pickle for a mock response from api mock_response_get_vfs_inputs = unpickle_object( workflow_test_pickles.workflow_get_vfs_inputs_response_pickle_path ) api = Mock() api.get = Mock() api.get.return_value = Mock() api.get().json.return_value = mock_response_get_vfs_inputs # - workflows = Workflows(api) response = workflows.get_vfs_inputs(asset_id='test_id') response.json() from nbdev.showdoc import * show_doc(Workflows.get_workflow_dependencies) # **Arguments**: # # - asset_id: Unique VFS identifier for the workflow you want to execute. # + #hide # Unit test code for get_workflow_dependencies # NOTE: get_vfs_inputs SHOULD be returning the vfs input # along with others, but endpoint isn't complete until API is out of beta. # for now, mocking with get_vfs_inputs api = Mock() api.get = Mock() api.get.return_value = Mock() api.get().json.return_value = mock_response_get_vfs_inputs # - workflows = Workflows(api) response = workflows.get_workflow_dependencies(asset_id='test_id') response.json() from nbdev.showdoc import * show_doc(Workflows.run_workflow) # **Arguments**: # # - asset_id: Unique VFS identifier for the workflow you want to execute. # - schedule_name: Optional: Name of the schedule that will be created when workflow execution is triggered. # - vfs_inputs: Optional: List of vfs asset ids, if desired # + #hide from unittest.mock import Mock run_workflow_response_mock = unpickle_object(workflow_test_pickles.workflow_run_response_pickle) api = Mock() api.post = Mock() api.post.return_value = Mock() api.post().json.return_value = run_workflow_response_mock # - workflows = Workflows(api) workflows.run_workflow(asset_id="test_id", schedule_name="jp_client_test").json() from nbdev.showdoc import * show_doc(Workflows.download_workflow_results) # **Arguments**: # # - schedule_id: Unique identifier for the completed schedule that orchestrated the workflow execution # - download_path: Local path to download the scheduled workflow results file to # + #hide #Unit Test Mocks for download_workflow_results workflow_job_response_mock = unpickle_object( workflow_test_pickles.workflow_job_response_pickle_path ) workflow_download_files_response_mock = unpickle_object( workflow_test_pickles.workflow_download_files_response_pickle_path ) # + #hide #Unit test code workflows = Mock() workflows.download_workflow_results = Mock() workflows.download_workflow_results.return_value = workflow_download_files_response_mock # - workflows.download_workflow_results(schedule_id='test_schedule_id', download_path='wf_out_test.csv')
06_workflows.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %matplotlib inline from bigbang.archive import Archive import bigbang.analysis.entity_resolution from bigbang.analysis import repo_loader; import matplotlib.pyplot as plt from matplotlib import animation, colors import pylab import numpy as np import pandas as pd from IPython.display import display # Used to display widgets in the notebook from IPython.display import clear_output # Introduction # ============ # In group efforts, there is sometimes the impression that there are those who work, and those who talk. A naive question to ask is whether or not the people that tend to talk a lot actually get any work done. This is an obviously and purposefully obtuse question with an interesting answer. # # We can use BigBang's newest feature, git data collection, to compare all of the contributors to a project, in this case Scipy, based on their email and git commit activity. The hypothesis in this case was that people who commit a lot will also tend to email a lot, and vice versa, since their involvement in a project would usually require them to do both. This hypothesis was proven to be correct. However, the data reveals many more interesting phenomenon. # Load the raw email and git data url = "http://mail.python.org/pipermail/scipy-dev/" arx = Archive(url,archive_dir="../archives") mailInfo = arx.data repo = repo_loader.get_repo("bigbang") gitInfo = repo.commit_data; # Entity Resolution # ================= # Git and Email data comes from two different datatables. To observe a single person's git and email data, we need a way to identify that person across the two different datatables. # # To solve this problem, I wrote an entity resolution client that will parse a Pandas dataframe and add a new column to it called "Person-ID" which gives each row an ID that represents one unique contributor. A person may go by many names ("<NAME>, <NAME>, <NAME>., etc.) and use many different emails. However, this client will read through these data tables in one pass and consolidate these identities based on a few strategies. entityResolve = bigbang.entity_resolution.entityResolve mailAct = mailInfo.apply(entityResolve, axis=1, args =("From",None)) gitAct = gitInfo.apply(entityResolve, axis=1, args =("Committer Email","Committer Name")) # After we've run entity resolution on our dataframes, we split the dataframe into slices based on time. So for the entire life-span of the project, we will have NUM_SLICES different segments to analyze. We will be able to look at the git and email data up until that certain date, which can let us analyze these changes over time. NUM_SLICES = 1500 # Number of animation frames. More means more loading time # + mailAct.sort("Date") gitAct.sort("Time") def getSlices(df, numSlices): sliceSize = len(df)/numSlices slices = [] for i in range(1, numSlices + 1): start = 0 next = (i)*sliceSize; next = min(next, len(df)-1) # make sure we don't go out of bounds slice = df.iloc[start:next] slices.append(slice) return slices mailSlices = getSlices(mailAct, NUM_SLICES) gitSlices = getSlices(gitAct, NUM_SLICES) # - # Merging Data Tables # =================== # Now we want to merge these two tables based on their Person-ID values. Basically, we first count how many emails / commits a certain contributor had in a certain slice. We then join all the rows with the same Person-ID to each other, so that we have the number of emails and the number of commits of each person in one row per person in one consolidated dataframe. We then delete all the rows where both of these values aren't defined. These represent people for whom we have git data but not mail data, or vice versa. # + def processSlices(slices) : for i in range(len(slices)): slice = slices[i] slice = slice.groupby("Person-ID").size() slice.sort() slices[i] = slice def concatSlices(slicesA, slicesB) : # assumes they have the same number of slices # First is emails, second is commits ansSlices = [] for i in range(len(slicesA)): sliceA = slicesA[i] sliceB = slicesB[i] ans = pd.concat({"Emails" : sliceA, "Commits": sliceB}, axis = 1) ans = ans[pd.notnull(ans["Emails"])] ans = ans[pd.notnull(ans["Commits"])] ansSlices.append(ans); return ansSlices processSlices(mailSlices) processSlices(gitSlices) finalSlices = concatSlices(mailSlices, gitSlices) # - # Coloring # ======== # We now assign a float value [0 --> 1] to each person. This isn't neccesary, but can let us graph these changes in a scatter plot and give each contributor a unique color to differentiate them. This will help us track an individual as their dot travels over time. # + def idToFloat(id): return id*1.0/400.0; for i in range(len(finalSlices)): slice = finalSlices[i] toSet = [] for i in slice.index.values: i = idToFloat(i) toSet.append(i) slice["color"] = toSet # - # Here we graph our data. Each dot represents a unique contributor's number of emails and commits. As you'll notice, the graph is on a log-log scale. # + data = finalSlices[len(finalSlices)-1] # Will break if there are 0 slices fig = plt.figure(figsize=(8, 8)) d = data x = d["Emails"] y = d["Commits"] c = d["color"] ax = plt.axes(xscale='log', yscale = 'log') plt.scatter(x, y, c=c, s=75) plt.ylim(0, 10000) plt.xlim(0, 10000) ax.set_xlabel("Emails") ax.set_ylabel("Commits") plt.plot([0, 1000],[0, 1000], linewidth=5) plt.show() # - # Animations # ========== # Below this point, you'll find the code for generating animations. This can take a long time (~30 mins) for a large number of slices. However, the pre-generated videos are below. # # The first video just shows all the contributors over time without unique colors. The second video has a color for each contributor, but also contains a Matplotlib bug where the minimum x and y values for the axes is not followed. # # There is a lot to observe. As to our hypothesis, it's clear that people who email more commit more. In our static graph, we could see many contributors on the x-axis -- people who only email -- but this dynamic graph allows us to see the truth. While it may seem that they're people who only email, the video shows that even these contributors eventually start committing. Most committers don't really get past 10 commits without starting to email the rest of the project, for pretty clear reasons. However, the emailers can "get away with" exclusively emailing for longer, but eventually they too start to commit. In general, not only is there a positive correlation, there's a general trend of everyone edging close to having a stable and relatively equal ratio of commits to emails. from IPython.display import YouTubeVideo display(YouTubeVideo('GCcYJBq1Bcc', width=500, height=500)) display(YouTubeVideo('uP-z4jJqxmI', width=500, height=500)) # + fig = plt.figure(figsize=(8, 8)) a = finalSlices[0] print(type(plt)) ax = plt.axes(xscale='log', yscale = 'log') graph, = ax.plot(x ,y, 'o', c='red', alpha=1, markeredgecolor='none') ax.set_xlabel("Emails") ax.set_ylabel("Commits") plt.ylim(0, 10000) plt.xlim(0, 10000) def init(): graph.set_data([],[]); return graph, def animate(i): a = finalSlices[i] x = a["Emails"] y = a["Commits"] graph.set_data(x, y) return graph, anim = animation.FuncAnimation(fig, animate, init_func=init, frames=NUM_SLICES, interval=1, blit=True) anim.save('t1.mp4', fps=15) # + def main(): data = finalSlices first = finalSlices[0] fig = plt.figure(figsize=(8, 8)) d = data x = d[0]["Emails"] y = d[0]["Commits"] c = d[0]["color"] ax = plt.axes(xscale='log', yscale='log') scat = plt.scatter(x, y, c=c, s=100) plt.ylim(0, 10000) plt.xlim(0, 10000) plt.xscale('log') plt.yscale('log') ani = animation.FuncAnimation(fig, update_plot, frames=NUM_SLICES, fargs=(data, scat), blit=True) ani.save('test.mp4', fps=10) #plt.show() def update_plot(i, d, scat): x = d[i]["Emails"] y = d[i]["Commits"] c = d[i]["color"] plt.cla() ax = plt.axes() ax.set_xscale('log') ax.set_yscale('log') scat = plt.scatter(x, y, c=c, s=100) plt.ylim(0, 10000) plt.xlim(0, 10000) plt.xlabel("Emails") plt.ylabel("Commits") return scat, main()
examples/git-analysis/Walkers and Talkers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Practice Markdown # This notebook lets you experiment with producing repeatable technical documentation. Use this notebook to do the following actions: # # * Create a code cell and print a Python Hello World statement: `print("hello world")` # * Create a Markdown Cell and add a Title using `#` and then a statement using plain text. You can looks at the source code for this cell to get an idea of how to use Markdown. # * Take some ideas from the [Master Markdown Github Guide](https://guides.github.com/features/mastering-markdown/) and implement them here. # * Download this notebook as a `.ipynb` file and check into a Github Project. You can [see an example here](https://github.com/noahgift/duke-coursera-ccf-lab1/blob/main/Practice-Markdown.ipynb). # * Open your notebook in Google Colab and save it into the same Github Project. You can [see an example here](https://github.com/noahgift/duke-coursera-ccf-lab1/blob/main/Practice_Markdown_with_colab.ipynb). # print("hello world") # # Practice Markdown # > We're living the future so
Practice-Markdown.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200> # <br></br> # <br></br> # # ## *Data Science Unit 4 Sprint 3 Assignment 2* # # Convolutional Neural Networks (CNNs) # + [markdown] colab_type="text" id="0lfZdD_cp1t5" # # Assignment # # - <a href="#p1">Part 1:</a> Pre-Trained Model # - <a href="#p2">Part 2:</a> Custom CNN Model # - <a href="#p3">Part 3:</a> CNN with Data Augmentation # # # You will apply three different CNN models to a binary image classification model using Keras. Classify images of Mountains (`./data/mountain/*`) and images of forests (`./data/forest/*`). Treat mountains as the postive class (1) and the forest images as the negative (zero). # # |Mountain (+)|Forest (-)| # |---|---| # |![](./data/mountain/art1131.jpg)|![](./data/forest/cdmc317.jpg)| # # The problem is realively difficult given that the sample is tiny: there are about 350 observations per class. This sample size might be sometime that can expect with prototyping an image classification problem/solution at work. Get accustomed to evaluating several differnet possible models. # + [markdown] colab_type="text" id="0lfZdD_cp1t5" # # Pre - Trained Model # <a id="p1"></a> # # Load a pretrained network from Keras, [ResNet50](https://tfhub.dev/google/imagenet/resnet_v1_50/classification/1) - a 50 layer deep network trained to recognize [1000 objects](https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt). Starting usage: # # ```python # import numpy as np # # from tensorflow.keras.applications.resnet50 import ResNet50 # from tensorflow.keras.preprocessing import image # from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions # # from tensorflow.keras.layers import Dense, GlobalAveragePooling2D() # from tensorflow.keras.models import Model # This is the functional API # # resnet = ResNet50(weights='imagenet', include_top=False) # # ``` # # The `include_top` parameter in `ResNet50` will remove the full connected layers from the ResNet model. The next step is to turn off the training of the ResNet layers. We want to use the learned parameters without updating them in future training passes. # # ```python # for layer in resnet.layers: # layer.trainable = False # ``` # # Using the Keras functional API, we will need to additional additional full connected layers to our model. We we removed the top layers, we removed all preivous fully connected layers. In other words, we kept only the feature processing portions of our network. You can expert with additional layers beyond what's listed here. The `GlobalAveragePooling2D` layer functions as a really fancy flatten function by taking the average of each of the last convolutional layer outputs (which is two dimensional still). # # ```python # x = res.output # x = GlobalAveragePooling2D()(x) # This layer is a really fancy flatten # x = Dense(1024, activation='relu')(x) # predictions = Dense(1, activation='sigmoid')(x) # model = Model(res.input, predictions) # ``` # # Your assignment is to apply the transfer learning above to classify images of Mountains (`./data/mountain/*`) and images of forests (`./data/forest/*`). Treat mountains as the postive class (1) and the forest images as the negative (zero). # # Steps to complete assignment: # 1. Load in Image Data into numpy arrays (`X`) # 2. Create a `y` for the labels # 3. Train your model with pretrained layers from resnet # 4. Report your model's accuracy # - # ## Load in Data # # ![skimage-logo](https://scikit-image.org/_static/img/logo.png) # # Check out out [`skimage`](https://scikit-image.org/) for useful functions related to processing the images. In particular checkout the documentation for `skimage.io.imread_collection` and `skimage.transform.resize`. import skimage import numpy as np from skimage.io import imread_collection from skimage.transform import resize # + image_files = ['./data/mountain/*', './data/forest/*'] mountain = np.asarray(imread_collection('./data/mountain/*')) forest = np.asarray(imread_collection('./data/forest/*')) # + X_train = np.append(mountain, forest, axis=0) # X_trian = mountain + forest y_train = [] for _ in mountain: y_train.append(1) for _ in forest: y_train.append(0) y_train = np.array(y_train) # - X_train.shape y_train.shape # ## Instatiate Model # + import numpy as np from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.keras.models import Model # - resnet = ResNet50(weights='imagenet', include_top=False) for layer in resnet.layers: layer.trainable = False # + x = GlobalAveragePooling2D()(resnet.output) x = Dense(1024, activation='relu')(x) predictions = Dense(2, activation='sigmoid')(x) model = Model(resnet.input, predictions) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # - # ## Fit Model model.fit(X_train, y_train, # batch_size=512, epochs=1, verbose=1, ) # # Custom CNN Model # Compile Model # Fit Model # + [markdown] colab_type="text" id="uT3UV3gap9H6" # # Resources and Stretch Goals # # Stretch goals # - Enhance your code to use classes/functions and accept terms to search and classes to look for in recognizing the downloaded images (e.g. download images of parties, recognize all that contain balloons) # - Check out [other available pretrained networks](https://tfhub.dev), try some and compare # - Image recognition/classification is somewhat solved, but *relationships* between entities and describing an image is not - check out some of the extended resources (e.g. [Visual Genome](https://visualgenome.org/)) on the topic # - Transfer learning - using images you source yourself, [retrain a classifier](https://www.tensorflow.org/hub/tutorials/image_retraining) with a new category # - (Not CNN related) Use [piexif](https://pypi.org/project/piexif/) to check out the metadata of images passed in to your system - see if they're from a national park! (Note - many images lack GPS metadata, so this won't work in most cases, but still cool) # # Resources # - [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) - influential paper (introduced ResNet) # - [YOLO: Real-Time Object Detection](https://pjreddie.com/darknet/yolo/) - an influential convolution based object detection system, focused on inference speed (for applications to e.g. self driving vehicles) # - [R-CNN, Fast R-CNN, Faster R-CNN, YOLO](https://towardsdatascience.com/r-cnn-fast-r-cnn-faster-r-cnn-yolo-object-detection-algorithms-36d53571365e) - comparison of object detection systems # - [Common Objects in Context](http://cocodataset.org/) - a large-scale object detection, segmentation, and captioning dataset # - [Visual Genome](https://visualgenome.org/) - a dataset, a knowledge base, an ongoing effort to connect structured image concepts to language
module2-convolutional-neural-networks/LS_DS_432_Convolution_Neural_Networks_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ### ASTR-598, Winter 2018, Connolly & Ivezic, University of Washington # https://github.com/dirac-institute/uw-astr598-w18/tree/master/lectures/Week-4-Thu.ipynb # # # ### The objectives of this lecture # - A gentle (re)introduction to MCMC (from a mathematical and intuative perspective) # - An overview of probablistic programming # + [markdown] slideshow={"slide_type": "slide"} # ### For further reading # # These lectures have been adapted from a range of different materials including # - [A Practical Python Guide for the Analysis of Survey Data](http://press.princeton.edu/titles/10159.html) Chapter 5. # - [<NAME>: ``MCMC sampling for dummies by <NAME>"](http://twiecki.github.io/blog/2015/11/10/mcmc-sampling/) # - [Probabilistic Programming and Bayesian Methods for Hackers by <NAME>](http://nbviewer.jupyter.org/github/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter1_Introduction/Ch1_Introduction_PyMC2.ipynb) # - [<NAME>'s (UW) discussion of MCMC](https://healthyalgorithms.com/) # - [Astro 324 special topics course at the University of Washington](https://github.com/uw-astr-324-s17/astr-324-s17) # + [markdown] slideshow={"slide_type": "slide"} # ### For those who want to dive deep: # - [Andrieu et al. ``An Introduction to MCMC for Machine Learning" (includes a few pages of history)"](http://www.cs.princeton.edu/courses/archive/spr06/cos598C/papers/AndrieuFreitasDoucetJordan2003.pdf) # + [markdown] slideshow={"slide_type": "slide"} # ### Software needed # # For the examples we will be using PYMC3 # # - pip install git+https://github.com/pymc-devs/pymc3 # # To demonstrate how MCMC techniques sample we will use Jake Vanderplas's animation code # - pip install JSAnimation # # Aside: PYMC3 should install Theano (a deep learning package) by default as it is used in manny of the definitions of distributions. We will call Theano a couple of times in the later exercises # + [markdown] slideshow={"slide_type": "slide"} # ### Testing it all works # # If you want to ensure that you have everything in place try to run the following piece of code (we will walk through what these lines mean as we go through the class). You should see something like the following figure # # <center><img src="figures/pymc3-gaussian.png" alt="pymc3 test" style="width:600px;"/></center> # # + slideshow={"slide_type": "subslide"} import numpy as np from matplotlib import pyplot as plt from scipy.stats import cauchy import scipy.stats as stats from JSAnimation import IPython_display import pymc3 as pm import theano as thea import theano.tensor as T # %matplotlib inline # + slideshow={"slide_type": "subslide"} #generate a set of data N = 200 mu_0 = 10. sigma_0 = 2. y = np.random.normal(loc=mu_0, scale=sigma_0, size=N) #run MCMC nsamples = 10000 with pm.Model() as model: mu = pm.Uniform('mu', lower=-20, upper=20) # a simple uniform prior sigma = pm.Uniform('sigma', lower=0, upper=10) y_obs = pm.Normal('Y_obs', mu, sigma, observed=y) # we use the canned distributions in PYMC3 start = pm.find_MAP() step = pm.NUTS(scaling=start) trace = pm.sample(nsamples, step, start, random_seed=123, progressbar=True) lines = {var:trace[var].mean() for var in trace.varnames} pm.traceplot(trace, lines= lines) pm.df_summary(trace) # - # **traceplot** provides # - marginal posterior distribution for each parameter (left) # - the timeseries of the parameter values from the chain (right) # # Right hand side shows broad oscillation: both inlying and extreme values occur frequently but at irregular intervals indicating the markov sampler chain was 'well-mixed' (sampling evenly around the optimal position) # # ## Bayesian Statistical Inference # # As you have seen in the earlier lectures (and will likely see many times again) in Bayesian inference, we evaluate the **posterior probability** by using # ** data likelihood** and **prior** information: # # $$p(M,\theta \,|\,D,I) = \frac{p(D\,|\,M,\theta,I)\,p(M,\theta\,|\,I)}{p(D\,|\,I)},$$ # # The prior can be expanded as # $$p(M,\theta\,|\,I) = p(\theta\,|\,M,I)\,p(M\,|\,I).$$ # # It is often the case that the ** evidence** $p(D\,|\,I)$ is not evaluated explictly since the posterior probability # can be (re)normalized. # + [markdown] slideshow={"slide_type": "slide"} # **The Bayesian Statistical Inference process** is then # * formulate the likelihood, $p(D\,|\,M,\theta,I)$ # * chose a prior $p(M,\theta\,|\,I)$, which incorporates *other information beyond the data in $D$* # * determine the posterior pdf, $p(M,\theta \,|\,D,I)$ # * search for the model parameters that maximize $p(M,\theta \,|\,D,I)$ # * quantify the uncertainty of the model parameter estimates (credible region) # # + [markdown] slideshow={"slide_type": "slide"} # # ### Estimating $p(\theta)$ # # We want to estimate $p(\theta)$ give data, a likelihood, and priors. For the simplest (low dimensional) case we could simply undertake a grid search to evaluate $p(\theta)$ for all $\theta$. As you might expect this becomes quite slow for large dimensional data sets or for large samples. # # Once we have the posterior we wish to evaluate the multidimensional integral # # $$I(\theta) = \int g(\theta) p(\theta) \, d\theta$$ # + [markdown] slideshow={"slide_type": "slide"} # Three examples of this are: # # - Marginalization: if $P$ elements of $\theta$ are the model parameters, and the next $k-P$ parameters are nuisance parameters (often used to capture systematics within the data) we integrate over the space spanned by nuisance parameters $\theta_j$, $j=(P+1), \ldots, k$. For this this case, $g(\theta)=1$. # - Estimating the posterior mean: $g(\theta)=\theta_m$ and the integral is performed over all other parameters # - Estimating credible regions (interval enclosing $1-\alpha$ of the posterior probability): we evaluate the integral up to an unknown normalization constant because the posterior pdf can be renormalized to integrate to unity. # + [markdown] slideshow={"slide_type": "slide"} # ## Monte Carlo Methods # # Monte carlo methods have been used prior to computers including Buffon's needle (Comte de Buffon) that was proposed as a method for estimating $\pi$ # # <center><img src="figures/220px-Buffon_needle.png" alt="buffon" style="width:300px;"/></center> # # Probability a needle will cross a line # >$P = \frac{2l}{t\pi}$ # # The modern version was invented in the late 1940s by <NAME>, while he was working on nuclear weapons projects (studying the distance neutrons traveled through different materials) with von Neumann developing techniques for pseudorandom numbers. The name Monte Carlo was given to the method by <NAME> (inventor of the Metropolis sampler, which evolved into one of the most famous MCMC algorithms, the Metropolis-Hastings algorithm). # + [markdown] slideshow={"slide_type": "slide"} # Assume that you can generate a distribution of M random numbers $\theta_j$ uniformly sampled # within the integration volume V. Then our interval can be evaluated as # $$ I = \int g(\theta) \, p(\theta) \, d\theta = \frac{V}{M} \sum_{j=1}^M g(\theta_j) \, p(\theta_j).$$ # # Note that in 1-D we can write a similar expression # $$ I = \int f(\theta) \, d\theta = \Delta \, \sum_{j=1}^M f(\theta_j).$$ # # where $ f(\theta) = g(\theta) \, p(\theta) $, and it is assumed that the values # $\theta_j$ are sampled on a regular grid with the step $\Delta = V/M$ ($V$ here is the # length of the sampling domain). This expression is the simplest example of # numerical integration ("rectangle rule", which amounts to approximating $f(\theta)$ # by a piecewise constant function). # + [markdown] slideshow={"slide_type": "slide"} # The reason why we expressed $f(\theta)$ # as a product of $g(\theta)$ and $p(\theta)$ is that, as we will see shortly, # we can generate a sample drawn from $p(\theta)$ (instead of sampling on a # regular grid), and this greatly improves the performance of numerical integration. # # One of the simplest numerical integration methods is generic Monte Carlo. We generate a random set of M values $\theta$, uniformly sampled within the integration volume $V(\theta)$ as ${ I \approx { V\theta \over M} \sum{j=1}^M g(\theta_j) \, p(\theta_j). }$ This method is very inefficient when the integrated function greatly varies within the integration volume, as is the case for the posterior pdf. This problem is especially acute with high-dimensional integrals. # + slideshow={"slide_type": "slide"} print ("fractional integration error for pi:") for M in [10, 100, 1000, 6000, 500000]: x = stats.uniform(0, 1).rvs(M) y = stats.uniform(0, 1).rvs(M) z = stats.uniform(0, 1).rvs(M) r2 = x*x+y*y r3 = x*x+y*y+z*z print ('M: ', M, np.abs((np.pi-4.0*np.size(x[r2<1])/M)/np.pi), np.abs((np.pi-6.*np.size(x[r3<1])/M))/np.pi) fig, ax = plt.subplots(subplot_kw=dict(aspect='equal')) plt.plot(x, y, '.k', markersize=3, c='blue') plt.plot(x[r2<1], y[r2<1], '.k', markersize=3, c='red') ax.axis([0, 1, 0, 1], aspect='equal'); plt.xlabel('x') plt.ylabel('y') plt.show() # + [markdown] slideshow={"slide_type": "slide"} # ## Exercise: Extend this technique to >2 dimensions # # Estimate how many samples would be required to reach a 1% estimate of $\pi$ (for the adventurous see Chapter 7 in "A Practical Python Guide for the Analysis of Survey Data" for formula for an n-dimensional hypersphere) # # + [markdown] slideshow={"slide_type": "slide"} # ## Markov Chains # # A number of methods exist that are much more efficient than generic Monte Carlo integration. # The most popular group of techniques is known as Markov Chain Monte Carlo (MCMC) . # # MCMC returns a sample of points, or **chain**, from the k-dimensional parameter space, with # a distribution that is **asymptotically proportional** to $p(\theta)$. The constant of # proportionality is not important in the first class of problems listed above. In model # comparison problems, the proportionality constant must be known and we will return to this # point later. # # Given such a chain of length M, the integral can be estimated as # $$ I = \int g(\theta) \, p(\theta) \, d\theta = \frac{1}{M} \sum_{j=1}^M g(\theta_j).$$ # # Again, here **the values of $\theta$ are not sampled uniformly from the volume**; # they are sampled **proportionally** to $p(\theta)$! Note that there is no $p(\theta_j)$ # term next to $g(\theta_j)$ because the proper weighting in the sum is taken care of # by the sample itself! # + [markdown] slideshow={"slide_type": "slide"} # ### What is a Markov process or chain? # # A Markov process is defined as a sequence of random variables where a parameter depends # *only* on the preceding value. Such processes are "memoryless". # # Mathematically, we have # $$p(\theta_{i+1}|\{\theta_i\}) = p(\theta_{i+1}|\,\theta_i).$$ # # # Think of $\theta$ as a vector in multidimensional space, and a realization of the chain represents a path through this space. # # For equilibrium, or a stationary distribution of positions, it is necessary that the transition probability is symmetric: # $$ p(\theta_{i+1}|\,\theta_i) = p(\theta_i |\, \theta_{i+1}). $$ # # # This is called the detailed balance or reversibility condition (i.e. the probability of a jump between two points does not depend on the direction of the jump). # + [markdown] slideshow={"slide_type": "slide"} # The use of resulting chains to perform Monte Carlo integration is called *Markov Chain Monte Carlo* (MCMC). # # Given such a chain of length $M$, the integral can be estimated as # $ # I = {1 \over M} \sum_{j=1}^M g(\theta_j). # $ # As a simple example, to estimate the expectation value for $\theta_1$ (i.e., $g(\theta)=\theta_1$), # we simply take the mean value of all $\theta_1$ in the chain. # # Given a Markov chain, quantitative description of the posterior pdf # becomes a density estimation problem. To visualize the posterior pdf # for parameter $\theta_1$, marginalized over all other parameters, $\theta_2, \ldots, \theta_k$, # we can construct a histogram of all $\theta_1$ values in the chain, and normalize its integral # to 1. To get a MAP (maximum a posterori) estimate for $\theta_1$, we find the maximum of this marginalized pdf. # + [markdown] slideshow={"slide_type": "slide"} # In order for a Markov chain to reach a stationary distribution proportional to $p(\theta)$, # the probability of arriving at a point $\theta_{i+1}$ must be proportional to $p(\theta_{i+1})$, # $$ p(\theta_{i+1}) = \int T(\theta_{i+1}|\theta_i) \, p(\theta_i) \, d \theta_i, $$ # where the transition probability $T(\theta_{i+1}|\theta_i)$ is called the jump kernel or # transition kernel (and it is assumed that we know how to compute $p(\theta_i)$). # # This requirement will be satisfied when the transition probability satisfies the detailed # balance condition # $$ T(\theta_{i+1}|\theta_i) \, p(\theta_i) = T(\theta_i|\theta_{i+1}) \, p(\theta_{i+1}). $$ # # # + [markdown] slideshow={"slide_type": "slide"} # ## Markov Chain Monte Carlo # # How do we build the chain? Need to choose how far we will be allowed to move at each step. Need to decide whether we will accept the move (e.g. of $p(\theta_{i+1})$ is smaller) # # *Various MCMC algorithms differ in their choice of transition kernel* # # The most classic approach is the **The Metropolis-Hastings algorithm** # # **The Metropolis-Hastings algorithm** adopts acceptance probability # $$ p_{\rm acc}(\theta_i,\theta_{i+1}) = { p(\theta_{i+1}) \over p(\theta_i) }, $$ # where the proposed point $\theta_{i+1}$ is drawn from an *arbitrary* symmetric density distribution $K(\theta_{i+1}\,|\,\theta_i)$. A Gaussian distribution centered on # $\theta_i$ is often used for $K(\theta_{i+1}|\theta_i)$. # + [markdown] slideshow={"slide_type": "slide"} # ** When $\theta_{i+1}$ is rejected, $\theta_i$ is added to the chain instead. ** # # The original Metropolis algorithm is based on a symmetric proposal distribution, # $K(\theta_{i+1}|\theta_i) = K(\theta_i|\theta_{i+1})$, which then cancels out from # the acceptance probability. **In this case, $\theta_{i+1}$ is always accepted if # $p(\theta_{i+1}) > p(\theta_i)$, and if not, then it is accepted with a probability # $p(\theta_{i+1})/p(\theta_i)$.** # # **The key aspect** is that # # $\frac{p(M,\theta_{i+1} \,|\,D,I)}{p(M,\theta_{i} \,|\,D,I)} = \frac{\frac{p(D\,|\,M,\theta_{i+1},I)\,p(M,\theta_{i+1},|\,I)}{p(D\,|\,I)}}{\frac{p(D\,|\,M,\theta_i,I)\,p(M,\theta_i\,|\,I)}{p(D\,|\,I)}} = \frac{p(D\,|\,M,\theta_{i+1},I)\,p(M,\theta_{i+1},|\,I)}{p(D\,|\,M,\theta_i,I)\,p(M,\theta_i\,|\,I)} # $ # # so there is no dependence on the evidence. # # This algorithm guarantees that the chain will reach an equilibrium, or stationary, distribution, and it will approximate a sample drawn from $p(\theta)$! # + [markdown] slideshow={"slide_type": "slide"} # ## Markov Chain Monte Carlo # # **In summary, the Metropolis-Hastings algorithm consists of these steps:** # # 1) given $\theta_i$ and $K(\theta_{i+1}|\theta_i)$, draw a proposed value for $\theta_{i+1}.$ # # 2) compute acceptance probability $p_{\rm acc}(\theta_i,\theta_{i+1})$. # # 3) draw a random number between 0 and 1 from a uniform distribution; if it is smaller than # $p_{\rm acc}(\theta_i,\theta_{i+1})$, then accept $\theta_{i+1}$. # # 4) if $\theta_{i+1}$ is accepted added it to the chain, if not, add $\theta_{i}$ to the chain. # # 5) use the chain (of $\theta$ values) for inference; e.g. a histogram of $\theta$ is # an estimator of the posterior pdf for $\theta$, $p(\theta)$, and the expectation value for # $\theta$ can be computed from # $$ I = \int g(\theta) \, p(\theta) \, d\theta = \frac{1}{M} \sum_{j=1}^M \theta_j.$$ # # where M is the number of elements in the chain (e.g. # the expectation value for $\theta$ is simply the mean value of chain elements). # # # + [markdown] slideshow={"slide_type": "slide"} # ## Caveats # # Although $K(\theta_{i+1}|\theta_i)$ satisfies a Markov chain requirement that it # must be a function of only the current position $\theta_i$, it takes a number # of steps to reach a stationary distribution from an initial arbitrary position $\theta_0$. # **These early steps are called the "burn-in" and need to be discarded in analysis.** # There is no general theory for finding transition from the burn-in phase to # the stationary phase; several methods are used in practice. Gelman and Rubin # proposed to generate a number of chains and then compare the ratio of # the variance between the chains to the mean variance within the chains (this # ratio is known as the $R$ statistic). For stationary chains, this ratio will # be close to 1. # # When the posterior pdf is multimodal, the simple Metropolis--Hastings algorithm can # become stuck in a local mode and not find the globally best mode within a reasonable # running time. There are a number of better algorithms, such as Gibbs sampling, parallel # tempering, various genetic algorithms, and nested sampling. # + [markdown] slideshow={"slide_type": "slide"} # ### How do we choose the next step # # How far should we step (small steps in parameter space or large). This impacts the efficiency of the process but not if we will reach equilibrium. We want our samples to be independent of one another which we can determine with the autocorrelation function - we can then adjust the step size to account for this (e.g. ensure that we reject 50% of the proposed steps) # # Other techniques: **NUTS (No-U-Turn Sampler)**, Metropolis, Slice, HamiltonianMC, and BinaryMetropolis # # [NUTS](http://arxiv.org/abs/1507.08050) is generally the sampler of choice for continuous parameters (it uses the gradient of the log posterior-density). See the end of this notebook for animations of different strategies # # + slideshow={"slide_type": "slide"} from IPython.display import HTML HTML('<iframe src="https://player.vimeo.com/video/19274900" width="640" height="480" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe><p><a href="https://vimeo.com/19274900">Metropolis in the Square</a> from <a href="https://vimeo.com/user3812935"><NAME></a> on <a href="https://vimeo.com">Vimeo</a>.</p>') # + slideshow={"slide_type": "slide"} HTML('<iframe src="https://player.vimeo.com/video/19274173" width="640" height="480" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe><p><a href="https://vimeo.com/19274173">Metropolis in Diagonal Region</a> from <a href="https://vimeo.com/user3812935"><NAME></a> on <a href="https://vimeo.com">Vimeo</a>.</p>') # -
lectures/Week-4-Thu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- """ Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner. How many such routes are there through a 20×20 grid? """ # + from scipy.special import comb comb(40,20)
Project_Euler-Problem_15.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # this program says hello and asks for the name print('Hello world!') print('What is your name?') myName = input() print('It is good to meet you, ' + myName) print('The length of your name is:') print(len(myName)) # + # this program says hello and asks for the name print('Hello world!') print('What is your name?') myName = input() print('It is good to meet you, ' + myName) print('The length of your name is:') print(len(myName)) # + # this program says hello and asks for the name print('Hello world!') print('What is your name?') myName = input() print('It is good to meet you, ' + myName) print('The length of your name is:') print(len(myName)) print('What is your age?') myAge=input() print('You will be ' + str(int(myAge)+1) + ' in a year') # - int('3') spam=True spam True=2+2 if 2+2==4: print('2+2 is indeed 4') else: print('that\'s not right') while True: print('Who are you?') name = input() if name != 'Joe': continue print('Hello, Joe. What is your password?') password = input() if password == '<PASSWORD>': break print('Access granted.') spam = int(input()) if spam==1: print('Hello') elif spam==2: print('Howdy') else: print('Greetings!') round(3.14159) round(3.90) # + import random def getAnswer(answerNumber): if answerNumber == 1: return 'It is certain' elif answerNumber ==2: return 'It is decidely so' elif answerNumber ==3: return 'Yes' elif answerNumber ==4: return 'Reply hazy try again' elif answerNumber ==5: return 'Ask again later' r=random.randint(1,6) fortune=getAnswer(r) print(fortune) # + active="" # # - print('Hello', 'World', sep='@') print('Hello', end='') print('World') # + def spam(): eggs = 'spam local' print(eggs) def bacon(): eggs = 'bacon local' print(eggs) spam() print(eggs) eggs = 'global' bacon() print(eggs) # + def spam(): global eggs eggs = 'spam' eggs = 'global' spam() print(eggs) # + # This is a guess the number game import random secretNumber = random.randint(1, 20) print('I\'m thinking of a number between 1 and 20') # Ask the player to guess 6 times for guessTaken in range(1,7): print('Take a guess') guess = int(input()) if guess < secretNumber: print('Your guess is too low') elif guess > secretNumber: print('Your guess is too high') else: break if guess == secretNumber: print('Good. The number was ' + str(secretNumber)) else: print('Nope. The number was ' + str(secretNumber)) # - catNames = [] while True: print('Enter the name or cat'+str(len(catNames)+1) + ' (Or enter nothing to stop.): ') name = input() if name == '': break catNames = catNames + [name] #list concatenation print('The cat names are: ') for name in catNames: print(' '+name) # + import random messages = ['It is certain', 'It is decidedly so', 'My reply is no', 'Very doubtful'] print(messages[random.randint(0, len(messages) - 1)]) # + # birthdays.py birthdays = {'Alice':'Apr 1', 'Bob':'Dec 12', 'Carol':'Mar 4'} while True: print('Enter a name (blank to quit):') name = input() if name == '': break if name in birthdays: print(birthdays[name] + ' is the birthday of ' + name) else: print('I do not have birthday information for ' + name) print('What is their birthday?') bday = input() birthdays[name] = bday print('Birthday database was updated.') # - spam = {'color':'red', 'age':42} for v in spam.values(): print(v) spam = {'color':'red', 'age':42} for v in spam.keys(): print(v) spam = {'color':'red', 'age':42} for v in spam.items(): print(v) spam = {'color':'red', 'age':42} list(spam.keys()) spam = {'color':'red', 'age':42} for k,v in spam.items(): print('Key: '+k+' Value: '+str(v)) spam = {'name':'Zophie','age':7} 'name' in spam.keys() picnicItems={'apples':5, 'cups':2} print('I am bringing '+str(picnicItems.get('cups',0))+' cups.') print('I am bringing '+str(picnicItems.get('eggs',0))+' eggs.') spam = {'name':'Pooka','age':5} spam.setdefault('color','black') spam.setdefault('color','white') # charactercount.py with pretty print import pprint message = 'It was a bright cold day in April, and the clocks were striking thirteen.' count = {} for character in message: count.setdefault(character, 0) count[character] = count[character] + 1 pprint.pprint(count) # + # Fantasy game inventory stuff = {'torch':6, 'rope':1, 'gold coin':42, 'dagger':1, 'arrow':12} def displayInventory(inventory): countInv = 0 for i, j in inventory.items(): print(str(j) + ' ' + i) countInv += 1 print('Total unique items: ' + str(countInv)) displayInventory(stuff) # + # Fantasy game inventory def displayInventory(inventory): countInv = 0 for i, j in inventory.items(): print(str(j) + ' ' + i) countInv += 1 print('Total unique items: ' + str(countInv)) def addToInventory(inventory, addItems): for i in addItems: if i in inventory: inventory[i] += 1 else: inventory[i] = 1 return inventory dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby'] inv = {'gold coin': 42, 'rope':1} inv = addToInventory(inv, dragonLoot) displayInventory(inv) # + #validateInput.py while True: print('Enter your age:') age = input() if age.isdecimal(): break print('Only enter a number for age.') while True: print('Select a new password(letters and numbers):') password = input() if password.isalnum(): break print('Passwords can only have letters and numbers.') # - ', '.join(['cats','rats','bats']) 'My name is Simon'.split() 'Hello'.center(20,'=') # + #picnicTable.py def printPicnic(itemsDict, leftWidth, rightWidth): print('PICNIC ITEMS'.center(leftWidth+rightWidth,'-')) for k, v in itemsDict.items(): print(k.ljust(leftWidth, '.')+str(v).rjust(rightWidth, '.')) picnicItems = {'sandwiches':4, 'apples':12, 'cups':4, 'cookies':8000} printPicnic(picnicItems, 12, 5) # - spam = ' Hello world ' spam = spam.strip() spam import pyperclip pyperclip.copy('Hello World') pyperclip.paste() # regex matching import re phoneNumRegex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d') # raw string passed to prevent escapes being caught mo = phoneNumRegex.search('My number is 415-555-4242.') print('Phone number found: ' + mo.group()) phoneNumRegex = re.compile(r'(\d\d\d)-(\d\d\d-\d\d\d\d)') mo = phoneNumRegex.search('My number is 415-555-4242.') mo.group(1) mo.groups() areaCode, mainNumber = mo.groups() print(areaCode) print(mainNumber) heroRegex = re.compile(r'Batman|Tina Fey') mo1 = heroRegex.search('Batman and Tiny Fey') mo1.group() batRegex = re.compile(r'Bat(man|mobile)') mo = batRegex.search('Batmobile lost a wheel.') mo.group() mo.groups() import re # optional regex matching batRegex=re.compile(r'Bat(wo)?man') mo1 = batRegex.search('The adventures of Batwoman') mo1.group() # nongreedy regex matching nonGreedy = re.compile(r'(Ha){3,5}?') mo1 = nonGreedy.search('HaHaHaHaHa') mo1.group() phoneNumRegex = re.compile(r'\d{3}-\d{3}-\d{4}') mo = phoneNumRegex.findall('Cell: 415-555-9999 Home: 212-555-0000') mo endsWithNumber=re.compile(r'\d$') endsWithNumber.search('Your number is 42') # another nongreedy nonGreedyReg = re.compile(r'<.*?>') mo = nonGreedyReg.search('<to serve man> for dinner.>') mo.group() robocop = re.compile(r'robocop', re.I) # pass re.I to Ignore Case regex robocop.search('ROBOCOP protects the innocent.').group() # sub() function namesRegex=re.compile(r'Agent \w+') namesRegex.sub('CENSORED', 'Agent Alice gave secret docs to Agent Bob') # + # multiple compile params require bitwise operator # re.compile(r'foo', re.IGNORECASE | re.DOTALL | re.VERBOSE) # -
AutomateBoringStuffWithPython.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python part 3 # # ## Objects (*things*) and functions # So far in Python we've come across things that exist such as: # - a variable with a value: `x = 1` # - a string: `s = "hello"` # - a list: `a = [1,2,3]` # These are examples of ***objects***: they are all separate instances of things in the computer's memory. They just exist and don't do anything on their own. # # We have also come across functions that ***do something***: # - `print()` # - `help()` # - `range()` # # In addition to these standalone functions, all the objects in Python come with their own ***built-in functions***. We use call these functions by following the name of the object by a dot and the name of the function. For example, all lists have a `clear` function that clears the list: a = [1,2,3] a.clear() a # As another example, all strings have a `capitalize` function: s = "hello" print(s.capitalize()) # Functions that belong to an object are properly called ***methods***. Every object has methods. Objects can also have variables that belong to them called ***member variables***, which we will come to later. # # Let's look again at list objects in Python and see what methods they have. # ## Data structures ## # ### <font color = "blue"> Lists </font> ### # We met lists in part one. We compared a string - which is a collection of characters - and a list - which is a collection of almost any object (even a mixture of objects). Let's look at some of the methods that are available to manipulate lists, by starting with a simple list and manipulating it. l = [1,2,3,4,5] # start with a simple list of ints # The `append` method appends an item... l.append(6) l # ...while `pop` removes one l.pop() l # `insert(i,x)` inserts object x at position i l.insert(1, 1.5) l # Clear the list with `clear` l.clear() l # Use `index` to find the index of the first occurence of an item l = ["h", "e", "l", "l", "o"] l.index("e") # and finally use `count` to count items: l.count("l") # There are lots more, but these are the most commonly used ones. # ### <span class="girk">Ex 3.1</span> ### # The list `records` consists of personal records. Each record is simply a list with two entries: a name and a year of birth: records = [ [ "<NAME>" , 1960 ] , [ "<NAME>" , 1941 ], [ "<NAME>" , 1947 ], [ "<NAME>" , 1942 ], [ "<NAME>" , 1957 ], [ "<NAME>" , 1947 ], [ "<NAME>" , 1942 ], [ "<NAME>" , 1961 ], [ "<NAME>" , 1955 ], [ "<NAME>" , 1940 ], ] # + [markdown] solution2="hidden" solution2_first=true # Write some code that will produce a second list that contains only those people born after 1950. Do this by: # - creating an empty list; # - writing a loop over each record in `records`; # - checking the record, and if appropriate using the `append` method to add it to your new list. # + solution2="hidden" post_1950 = [] for record in records: if record[1] > 1950: post_1950.append(record) post_1950 # - # ## Strings are immutable... ## # Remember using slices with strings? s = "hello there" print(s[0:5]) # We can do the same with a list: nums = [0, 1, 2, 3, 4, 5] print(nums[0:3]) # first three elements print(nums[-2:]) # last two elements # We said that strings are **immutable**; they can't be altered, so you can't do # + #s[3] = u # you get an error if you do this # - # ## ... but lists *are* mutable # You can reassign individual items nums[3] = 8 nums # or even slices: nums[:3] = [0, -1, -2] # reassign first three items nums # When reassigning slices like this, the sizes don't need to match: nums[:] = [1] # replace the whole list with a list of length 1 nums # ### <span class="girk">Ex 3.2</span> ### # Given what you known of slicing, what does the following do? # + solution2="hidden" solution2_first=true nums = [1, 2, 3, 4, 5] nums[:] = list[::-1] # + solution2="hidden" list # - # ### Clearing bits of strings ### # We have used slices to select part of a list. Now we can **delete** part of a list by taking a **slice** and assigning it to an empty list, `[]`. # # Clear the first two items: nums = [1,2,3,4,5] nums[0:2] = [] nums # clear the whole list: nums[:] = [] nums # clear a single item nums = [1,2,3] nums[0:1] = [] nums # Notice that we have to select a slice to replace, rather than an item. This doesn't work: nums[0] = [] nums # as we end up with a list within a list. # ## Tuples ## # Tuples are very like lists. A tuple (usually pronouced "tyou-pell" but sometimes "tupple") is a collection that # - may contain different object types # - is ordered # - is ***immutable*** - this is probably the key difference. # # We define a tuple like a list, but without the square brackets: tuple = 1, 2, "a", 2.4 tuple # A tuple of length one is defined using a trailing comma: notatuple = 0 isatuple = 0, type(isatuple) # check it's a tuple # Sometimes round brackets are used to enclose a tuple, though they're not necessary. # # A nice use of tuples is to assign multiple variables at once: x, y, z = 1, 2, 3 print(x, y, z) # This statement really containts two tuples - `x, y, z` and `1, 2, 3` - and we are able to assign individual elements separately. # # You can access elements in a tuple as you would a list: tuple[0] # + [markdown] solution2="shown" solution2_first=true # ### <span class="girk">Ex 3.3</span> ### # Create a tuple containing a string, an int, a float and a list. # + solution2="shown" tuple2 = ("hello", 7, 3.4, [1, 2] ) # - # ## Sets ## # Sets are collections that # - are unordered # - have no duplicate items # # Define a set using brace brackets: set = {"h", "e", "l", "l", "o"} set # Notice: # - the set doesn't let us have duplicates (the second l was removed) # - the order is disregarded # Check the presence of an item using `in`: "h" in set # Add an item to the set using `add`... set.add("z") set # ...and remove one using `remove` set.remove("e") set # Some methods are similar to those for lists, such as `clear` set.clear() set # ## Dictionaries ## # The list is a data structure that allows you to access a value - an item in the list - using an index, i.e. its position: students = ["Alice", "Chris", "Pavel", "Pablo"] students[1] # A dictionary allows you to access a ***value*** using a ***key*** rather than an index. Let's use a dictionary of students to store their grades: # - the ***key*** is their **name** # - the ***value*** is their **grade** students = {"Alice": 13, "Chris": 11, "Pavel": 19, "Pablo": 4, "Martin" : 13} students # Each item in the dictionary is defined using **key**:**value**, and the whole thing is in brace brackets. # # A dictionary: # - has no duplicate *keys* (just like a paper dictionary has no duplicate words) # - can have identical *items* (as long as they have a different key): Alice and Martin have the same grade as we saw # - is unordered - we don't care about the position of items # # Access an item using its key: students["Alice"] # Add an item students["Aziz"] = 12 students # Remove an item using `pop` students.pop("Aziz") students # Check for a key using `in` (in the same way as we did with a set): "Alice" in students # ### <span class="girk">Ex 3.4</span> ### # An encrypted message has been discovered together with part of a codebook describing the simple cipher used to encrypt it. For each entry in the dictionary below, the key is a letter in the secret message, and the value is a letter in the plaintext original message: # + message = "shp#hars#rtdw#oitlw#halw#izwa#shp#xmw#yhma#xak#halw#izwa#shp#rhhj#kwxoz#ta#ozw#bxlw" codebook = { # code : original 'y': 'b', 'l': 'c', 'k': 'd', 'b': 'f', 'f': 'g', 'z': 'h', 't': 'i', 'n': 'j', 'j': 'k', 'r': 'l', 'q': 'm', 'a': 'n', 'h': 'o', 'v': 'p', 'g': 'q', 'm': 'r', 'u': 's', 'p': 'u', 'd': 'v', 'i': 'w', 'c': 'x', 's': 'y', 'e': 'z', '#': ' ' } # + [markdown] solution2="hidden" solution2_first=true # However, the last three crucial entries are missing. These have been discovered to be # - `'w': 'e'` # - `'o': 't'` # - `'x': 'a'` # # - Add these entries to `codebook` like we did with the entry "Alice". # - Write a loop that takes each letter of `message` and uses `codebook` to look up the unencrypted letter, to decipher the message. # - # ## Functions `list`, `tuple`, and `set` ## # We have seen how to define collections - lists tuples and sets - already: my_list = [1, 1, 2, 3] my_tuple = (1, 1, 2, 3) my_dict = {"a" : 1 , "b" : 1 , "c" : 2, "d" : 3} # The functions `list`, `tuple` and `set` allow us to define collections another way, by turning one sort into another. For example, let's turn a tuple into a list: new_list = list(my_tuple) new_list # or vice-versa: new_tuple = tuple(my_list) new_tuple # Turn a list into a set (and notice the duplicate entry is removed) new_set = set(my_list) new_set # These functions will work with other iterable objects (i.e. things you can iterate over). Let's try with a string (which is just a collection of characters after all): # + agent = "saunders" string_list = list(agent) print(string_list) string_tuple = tuple(agent) print(string_tuple) string_set = set(agent) print(string_set) # + [markdown] solution2="hidden" solution2_first=true # ### <span class="girk">Ex 3.5</span> ### # Given a string `words`, write code that creates a list of the unique characters in that string. # # For an extra challenge, create a dictionary where every key is a letter in the string, and each value is the number of occurences of that letter. E.g. the word "salsas" would produce the dictionary `{"s" : 3, "a" : 2, "l" : 1}`. *Hint:* the `count` function counts the number of occurrences in a string. # + solution2="hidden" words = "natalia simonava" letter_list = list(set(words)) print(letter_list) letter_dict = {} for letter in letter_list: letter_dict[letter] = words.count(letter) letter_dict # - # ## Comprehensions ## # As stated in the introduction, the intention with these tutorials is to explain the basic elements of Python to allow you to write code quickly, without having to invest in learning some of the more advanced language features. Comprehensions are one such advanced feature that we *are* going to consider, because: # - they provide an elegant syntax which will spare you a lot of typing when solving common problems; # - you will see them all the time in Python code examples, so it makes sense to understand them. # # Here is the motivation: an extremely common task is that of creating a list programmatically - let's take a list of fractions as an example: # # $\left[\frac{1}{1}, \frac{1}{2}, \frac{1}{3}, \cdots, \frac{1}{10}\right]$. # # Using our knowledge we could code this like so: # fractions = [] for i in range(1, 11): fractions.append(1 / i) fractions # The code above takes an empty list, and uses a range of values. For each value in the range, it calculates something (a fraction in this case) and adds it to the list. This task is so common that Python provides a neat syntax for it, called a ***list comprehension***: fractions = [1 / x for x in range(1, 11)] fractions # Here is another example: create a list of all the individual letters in a string: letters = [l for l in "i never joke about my work 007"] letters # We see that a list comprehension has the form: # # [*do something* with *VARIABLE* `for` VARIABLE `in` *OBJECT you can iterate over, e.g. list, range, string*]. # # Here is another example that takes a list of words and produces a list of their lengths: quote = ["Pistols", "at", "dawn", "it's", "a", "little", "old", "fashioned", "isn't", "it"] [len(word) for word in quote] # Suppose now we want to count words lengths again, but we want to ignore any instances of the words "a" and "the". Fortunately, Python allows you to add an if statement at the end of a list comprehension that will determine whether or not the element is included in the new list: quote = ["a", "cat", "sat", "on", "the", "mat"] [len(word) for word in quote if word != "a" and word != "the"] # + [markdown] solution2="hidden" solution2_first=true # ### <span class="girk">Ex 3.6</span> ### # Write a list comprehension that produces a list of the squares $x ^ 2$ for $x$ in $1, 2, 3, \cdots, 100$, but only if $x ^ 2$ is a multiple of 3 and 5. # + solution2="hidden" squares = [x ** 2 for x in range(1, 101) if (x ** 2) % 3 == 0 and (x ** 2) % 5 == 0] squares # - # ## Assiging variables and variable binding## # Remember that we can assign variables to other variables, so we can create a variable `x` and then assign its value to `y`: x = 1 y = x # If we alter one, the other stays unchanged: x += 1 # increment x: check y remains unchanged print(x, y) # If you try the same thing with a list, something strange happens: list_x = [1, 2, 3] list_y = list_x list_y.clear() print(list_x, list_y) # Clearing `list_y` has had an effect on `list_x`! In fact the same will happen for any object that is mutable, e.g. a set: set_x = {1, 2, 3} set_y = set_x set_y.remove(1) print(set_x, set_y) # This is because of the way assignment works in Python. # # What we actully are doing when entering `x = 1` is creating an association between the name `x` and the object `1`: we say that the object `1` is bound to `x`. Now when we do `y = x`, the variable `y` is also bound to whatever `x` is bound to (`1`). We ***do not copy*** `y` into `x`. # # When we increment `x` with `x += 1`, we don't alter the object that `x` is bound to (it's a number which is immutable); rather, `x` is reassigned to the object `2`. The picture below explains this. # Every object in Python has a unique ID. By checking this ID with the `id` function, we can show that when we perform assignment, the two variables refer to the same object, rather than a copy. Let's do this with strings: x = "abc" x = y print(id(x), id(y)) # Now it's clear what happened with the lists: `list_x` and `list_y` actually both refer to the same object; we can check: print(id(list_x), id(list_y)) # Since lists are mutable, any changes to `list_x` or `list_y` change a single underlying object. The picture below explains this. # So we see that: # # ***assigning variables simply means binding them to an object; assigning one variable to another means that both will refer to the same underlying object.*** # ## Altering things inside a function ## # We have just seen that if we perform an assignment of the form `x = y` then: # - if `x` and `y` are mutable objects, any change in `x` is reflected in `y`; # - if `x` and `y` are numbers, then alterations to `x` don't affect `y`. # # Similarly, we can think about how variables are changed when they are passed in to a function. The following function takes a value `number` and a list `list` and alters each one: def alter(number, list): number += 1 # add one to number list.append(1) # append a 1 to list # Let's define some variables, and see how they look before and after passing them to our function: # + my_number = 0 my_list = [] print("before function") print (my_number, my_list) alter(my_number, my_list) print("after function") print(my_number, my_list) # - # Just like earlier when we investigated assigning variables to each other, the number is left unchanged, but the list is altered. # # It's quite useful being able to pass an argument to a function and have the function alter it *in place*, rather than having to return a value. As an example, here is a function to swap a list containing two items: # + def swap(l): l[0], l[1] = l[1], l[0] # testing l = [1,2] swap(l) l
part_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Calculating Containment # # In this notebook, you'll implement a containment function that looks at a source and answer text and returns a *normalized* value that represents the similarity between those two texts based on their n-gram intersection. import numpy as np import sklearn # ### N-gram counts # # One of the first things you'll need to do is to count up the occurrences of n-grams in your text data. To convert a set of text data into a matrix of counts, you can use a [CountVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html). # # Below, you can set a value for n and use a CountVectorizer is used to count up the n-gram occurrences. In the next cell, we'll see that the CountVectorizer constructs a vocabulary, and later, we'll look at the matrix of counts. # + from sklearn.feature_extraction.text import CountVectorizer a_text = "This is an answer text" s_text = "This is a source text" # set n n = 1 # instantiate an ngram counter counts = CountVectorizer(analyzer='word', ngram_range=(n,n)) # create a dictionary of n-grams by calling `.fit` vocab2int = counts.fit([a_text, s_text]).vocabulary_ # print dictionary of words:index print(vocab2int) # - # ### EXERCISE: Create a vocabulary for 2-grams (aka "bigrams") # # Create a `CountVectorizer`, `counts_2grams`, and fit it to our text data. Print out the resultant vocabulary. # + # create a vocabulary for 2-grams n = 2 counts_2grams = CountVectorizer(analyzer='word', ngram_range=(n,n), token_pattern = r"(?u)\b\w+\b") # create a dictionary of n-grams by calling `.fit` vocab2int_2grams = counts_2grams.fit([a_text, s_text]).vocabulary_ # print dictionary of words:index print(vocab2int_2grams) # - # ### What makes up a word? # # You'll note that the word "a" does not appear in the vocabulary. And also that the words have been converted to lowercase. When `CountVectorizer` is passed `analyzer='word'` it defines a word as *two or more* characters and so it ignores uni-character words. In a lot of text analysis, single characters are often irrelevant to the meaning of a passage, so leaving them out of a vocabulary is often desired behavior. # # For our purposes, this default behavior will work well; we don't need uni-character words to determine cases of plagiarism, but you may still want to experiment with uni-character counts. # # > If you *do* want to include single characters as words, you can choose to do so by adding one more argument when creating the `CountVectorizer`; pass in the definition of a token, `token_pattern = r"(?u)\b\w+\b"`. # # This regular expression defines a word as one or more characters. If you want to learn more about this vectorizer, I suggest reading through the [source code](https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L664), which is well documented. # # **Next, let's fit our `CountVectorizer` to all of our text data to make an array of n-gram counts!** # # The below code, assumes that `counts` is our `CountVectorizer` for the n-gram size we are interested in. # + # create array of n-gram counts for the answer and source text ngrams = counts.fit_transform([a_text, s_text]) # row = the 2 texts and column = indexed vocab terms (as mapped above) # ex. column 0 = 'an', col 1 = 'answer'.. col 4 = 'text' ngram_array = ngrams.toarray() print(ngram_array) # - # So, the top row indicates the n-gram counts for the answer text `a_text`, and the second row indicates those for the source text `s_text`. If they have n-grams in common, you can see this by looking at the column values. For example they both have one "is" (column 2) and "text" (column 4) and "this" (column 5). # # ``` # [[1 1 1 0 1 1] = an answer [is] ______ [text] [this] # [0 0 1 1 1 1]] = __ ______ [is] source [text] [this] # ``` # ### EXERCISE: Calculate containment values # # Assume your function takes in an `ngram_array` just like that generated above, for an answer text (row 0) and a source text (row 1). Using just this information, calculate the containment between the two texts. As before, it's okay to ignore the uni-character words. # # To calculate the containment: # 1. Calculate the n-gram **intersection** between the answer and source text. # 2. Add up the number of common terms. # 3. Normalize by dividing the value in step 2 by the number of n-grams in the answer text. # # The complete equation is: # # $$ \frac{\sum{count(\text{ngram}_{A}) \cap count(\text{ngram}_{S})}}{\sum{count(\text{ngram}_{A})}} $$ def containment(ngram_array): ''' Containment is a measure of text similarity. It is the normalized, intersection of ngram word counts in two texts. :param ngram_array: an array of ngram counts for an answer and source text. :return: a normalized containment value.''' # your code here # ngram intersection between two texts interseption_ngrams_count = np.minimum(ngram_array[0,], ngram_array[1,]).sum() # number of ngrams in the answer a_ngrams_count = ngram_array[0,].sum() return interseption_ngrams_count / a_ngrams_count # + # test out your code containment_val = containment(ngrams.toarray()) print('Containment: ', containment_val) # note that for the given texts, and n = 1 # the containment value should be 3/5 or 0.6 assert containment_val==0.6, 'Unexpected containment value for n=1.' print('Test passed!') # + # test for n = 2 counts_2grams = CountVectorizer(analyzer='word', ngram_range=(2,2)) bigram_counts = counts_2grams.fit_transform([a_text, s_text]) # calculate containment containment_val = containment(bigram_counts.toarray()) print('Containment for n=2 : ', containment_val) # the containment value should be 1/4 or 0.25 assert containment_val==0.25, 'Unexpected containment value for n=2.' print('Test passed!') # - # I recommend trying out different phrases, and different values of n. What happens if you count for uni-character words? What if you make the sentences much larger? # # I find that the best way to understand a new concept is to think about how it might be applied in a variety of different ways.
Containment_Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # **Traffic Sign Recognition** # # ## Writeup # # --- # # **Build a Traffic Sign Recognition Project** # # The goals / steps of this project are the following: # * Load the data set (see below for links to the project data set) # * Explore, summarize and visualize the data set # * Design, train and test a model architecture # * Use the model to make predictions on new images # * Analyze the softmax probabilities of the new images # * Summarize the results with a written report # # # [//]: # (Image References) # # [image1]: ./Write_up_images/visualisation_plot.png "Visualization" # [image2]: ./Write_up_images/train_histogram.png "Training Set Histogram" # [image3]: ./Write_up_images/valid_histogram.png "Validation Set Histogram" # [image4]: ./Write_up_images/Grayscaling.png "Grayscaling Before and After" # [image5]: ./Write_up_images/Translating.png "Translating" # [image6]: ./Write_up_images/Zoom.png "Zooming" # [image7]: ./Write_up_images/Rotate.png "Rotate" # [image8]: ./Write_up_images/tilt.png "Tilt" # [image9]: ./Write_up_images/new_histogram_train.png "Train" # [image10]: ./Write_up_images/new_histogram_valid.png "Valid" # [image11]: ./Internet_Images/30kmhSign.jpeg "30km" # [image12]: ./Internet_Images/BicycleCrossing.jpeg "Bicycle" # [image13]: ./Internet_Images/ChildrenCrossing.jpeg "Children" # [image14]: ./Internet_Images/RoadWorks.jpeg "Roadworks" # [image15]: ./Internet_Images/StopSign.jpeg "Stop Sign" # [image16]: ./Write_up_images/Image_Predictions.png "Softmax Predictions" # # # ## Rubric Points # ### Here I will consider the [rubric points](https://review.udacity.com/#!/rubrics/481/view) individually and describe how I addressed each point in my implementation. # # --- # ### Writeup / README # # #### 1. Provide a Writeup / README that includes all the rubric points and how you addressed each one. You can submit your writeup as markdown or pdf. You can use this template as a guide for writing the report. The submission includes the project code. # # You're reading it! and here is a link to my [project code](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/Traffic_Sign_Classifier.ipynb) # # ### Data Set Summary & Exploration # # #### 1. Provide a basic summary of the data set. In the code, the analysis should be done using python, numpy and/or pandas methods rather than hardcoding results manually. # # I used the numpy library (shape function) to calculate summary statistics of the traffic data set. # Data set summary: # # * The size of training set is **34799** # * The size of the validation set is **4410** # * The size of test set is **12630** # * The shape of a traffic sign image is **32x32x3** # * The number of unique classes/labels in the data set is **43** # # #### 2. Include an exploratory visualization of the dataset. # I plotted a random image of each class using matplotlib functions. I've also plotted a histogram of the training and validation set. The results are shown below. # # ![alt text][image1] # ![alt text][image2] # ![alt text][image3] # # # ### Design and Test a Model Architecture # # #### 1. Describe how you preprocessed the image data. What techniques were chosen and why did you choose these techniques? Consider including images showing the output of each preprocessing technique. Pre-processing refers to techniques such as converting to grayscale, normalization, etc. (OPTIONAL: As described in the "Stand Out Suggestions" part of the rubric, if you generated additional data for training, describe why you decided to generate additional data, how you generated the data, and provide example images of the additional data. Then describe the characteristics of the augmented training set like number of images in the set, number of images for each class, etc.) # ##### Grayscaling # As a first step, I decided to convert the images to grayscale because it reduces the complexity of the image. With 3 RGB channels, a more complex model architecture will be needed. Since the signs can be distinguished just by features alone, a grayscale image is preferrable. # # Here is an example of a traffic sign image before and after grayscaling. # # ![alt text][image4] # # ##### Normalising # As a last step, I normalized the image data because it ensures that the data is on the same scale. # **Before** # Mean of train set: 82.677589037 # Mean of valid set: 83.5564273756 # **After** # Normalized mean of train set: -0.354081335648 # Normalized mean of valid set: -0.347215411128 # # ##### Resampling # I decided to generate additional data because when I played around with different architectures, I couldn't achieve any higher accuracy than 0.93. I researched ways to improve the accuracy, and resampling was one of the ways. When I looked at the training set histogram, I saw that some classes were underepresented (or too low). Hence, I generated additional data by creating altered copies of the current samples. # # To add more data to the the data set, I altered existing images by tilting, zooming, rotating and translating. This was done by using OpenCV functions. # **Translating** # ![alt text][image5] # **Zooming** # ![alt text][image6] # **Rotating** # ![alt text][image7] # **Tilting** # ![alt text][image8] # # ##### New Data Set # If the samples for a particular class were less than 250, new samples for that class would be generated until the number is 4 times as much. # The samples with fewer than 500 samples were multiplied by 2. # Other samples sizes remained the same. # # New Training Histogram # ![alt text][image9] # New Validation Histogram # ![alt text][image10] # # #### 2. Describe what your final model architecture looks like including model type, layers, layer sizes, connectivity, etc.) Consider including a diagram and/or table describing the final model. # # My final model consisted of the following layers: # # | Layer | Description | # |:---------------------:|:---------------------------------------------:| # | Input | 32x32x1 RGB image | # | 1. Convolution | 1x1 stride, same padding, outputs 28x28x6 | # | RELU | | # | Max pooling | 2x2 stride, outputs 14x14x6 | # | 2. Convolution | 1x1 strides, same padding, outputs 10x10x16 | # | RELU | | # | Max pooling | 2x2 stride, outputs 5x5x16 | # | 3. Convolution | 1x1 strides, same padding, outputs 1x1x400 | # | RELU | | # | Drop out | Prob = 0.5 | # | Add conv2 and conv3 outputs | Output 800 | # | Fully connected | Output 200 | # | Fully connected | Output 43 | # # # #### 3. Describe how you trained your model. The discussion can include the type of optimizer, the batch size, number of epochs and any hyperparameters such as learning rate. # # | Parameter | Value | # |:---------------------:|:---------------------------------------------:| # | Learning rate | 0.0009 | # | Epochs | 25 | # | Patch Size | 150 | # | Mean | 0 | # | Std. dev. | 0.1 | # # # #### 4. Describe the approach taken for finding a solution and getting the validation set accuracy to be at least 0.93. Include in the discussion the results on the training, validation and test sets and where in the code these were calculated. Your approach may have been an iterative process, in which case, outline the steps you took to get to the final solution and why you chose those steps. Perhaps your solution involved an already well known implementation or architecture. In this case, discuss why you think the architecture is suitable for the current problem. # # My final model results were: # * Validation set accuracy of **0.969** # * Test set accuracy of **0.921** # # If an iterative approach was chosen: # * At first I tried the suggested LeNet structure and it a gave decent result of 0.890 accuracy. # * The accuracy wasn't sufficient for this project, hence I tried adding an additional convolutional layer, and an additional fully connected layer. The accuracy of this model was around 0.92. # * I then decided to try out the suggested architecture provided in the [paper](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). I copied the model archtecture displayed by the figure 2 and it gave an accuracy of somewhere close to 0.93. I tried changing different parameters, like changing batch size and the number of epochs, but it couldn't surpass the 0.93 threshold. Hence, I decided to add more samples to the training and validation set. This improved the accuracy significantly to where it is now. # # # ### Test a Model on New Images # # #### 1. Choose five German traffic signs found on the web and provide them in the report. For each image, discuss what quality or qualities might be difficult to classify. # # Here are five German traffic signs that I found on the web (cropped and resized to 32x32 px): # # ![alt text][image11] ![alt text][image12] ![alt text][image13] # ![alt text][image14] ![alt text][image15] # # Initially I cropped the images so that sign occupied the whole image. However, it gave a poor performance of 0.4 accuracy. Hence, I had to recrop the images so that there is some margin on the sides. To ensure that the model can classify the signs correctly, next time more zoomed in images should be generated to prevent this issue. # # The road works sign seems to be hard for the model to classify. It could be due to a patch of dirt on it which isn't visible (now that it has been resized). It also appears to me that the images are having some noise on it due to the jpeg format, and the quality of the image seems to be worse when compared to the provided data set. # # #### 2. Discuss the model's predictions on these new traffic signs and compare the results to predicting on the test set. At a minimum, discuss what the predictions were, the accuracy on these new predictions, and compare the accuracy to the accuracy on the test set (OPTIONAL: Discuss the results in more detail as described in the "Stand Out Suggestions" part of the rubric). # # Here are the results of the prediction: # # | Image | Prediction | # |:---------------------:|:---------------------------------------------:| # | 30 km/h | 30 km/h | # | Stop Sign | Stop Sign | # | Children Crossing | Children Crossing | # | Bicycle Crossing | Bicycle Crossing | # | Road works | Right-of-way at the next intersection | # # # The model was able to correctly guess 4 of the 5 traffic signs, which gives an accuracy of 80%. This is poor compared to the tested accuracy of 96.9%. This could be due to the image quality. # # #### 3. Describe how certain the model is when predicting on each of the five new images by looking at the softmax probabilities for each prediction. Provide the top 5 softmax probabilities for each image along with the sign type of each probability. (OPTIONAL: as described in the "Stand Out Suggestions" part of the rubric, visualizations can also be provided such as bar charts) # # The code for making predictions on my final model is located in the 2nd last code block of the Ipython notebook. # # For the first image, the model is relatively sure that this is a stop sign (probability of 0.6), and the image does contain a stop sign. The top five soft max probabilities were # # Image 1: 30 km/h # # | Probability | Prediction | # |:---------------------:|:---------------------------------------------:| # | 1.00 | 30 km/h | # | .00 | 50 km/h | # | .00 | 80 km/h | # | .00 | 20 km/h | # | .00 | End of all speed and passing limits | # # Image 2: Stop Sign # # | Probability | Prediction | # |:---------------------:|:---------------------------------------------:| # | 1.00 | Stop Sign | # | .00 | Keep Right | # | .00 | 70 km/h | # | .00 | 20 km/h | # | .00 | No Entry | # # Image 3: Children Crossing # # | Probability | Prediction | # |:---------------------:|:---------------------------------------------:| # | 1.00 | Children Crossing | # | .00 | Dangerous curve to the right | # | .00 | Bicycle Crossing | # | .00 | Slippery Road | # | .00 | Pedestrians | # # Image 4: Bicycle Crossing # # | Probability | Prediction | # |:---------------------:|:---------------------------------------------:| # | 1.00 | Bicycle Crossing | # | .00 | Children Crossing | # | .00 | Road narrows on the right | # | .00 | 60 km/h | # | .00 | Slippery Road | # # Image 5: Road works # # | Probability | Prediction | # |:---------------------:|:---------------------------------------------:| # | .99 | Right-of-way at the next intersection | # | .01 | Dangerous curve to the right | # | .00 | Beware of ice/snow | # | .00 | Road works | # | .00 | Pedestrians | # # # Displayed results: # ![alt text][image16] # # The prediction for "Road works" sign is completely incorrect. # #
Write_up_template.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # name: python3 # --- # # Overview # # This Jupyter Notebook takes in data from a Google Sheet that contains line change details and their associated high level categories and outputs a JSON file for the MyBus tool. # # The output file is used by the MyBus tool's results page and contains the Line-level changes that are displayed there. # # Run all cells to generate: `lines-changes.json` # + import pandas as pd # GOOGLE_SHEET_URL = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQq0095iOV4dn5McH5IgL4tfjBGLRpCS4XIw-TsZKXubWLyycCfbmnyWdDJRr73ctUMjv32DvKmvVbj/pub?output=csv' # GOOGLE_SHEET_URL = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQKADb-wnackdlDZwEF0mTpLPh7MpkI4YQV5gv1TYOzltjiGAXcj35GTb4ftP7yKN8mH74MWLPkSUlq/pub?output=csv' GOOGLE_SHEET_URL = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vSENm-oLTxuzcQUX_0tZ9X0Q2_HIudg1hi5p0MMauqWoHCuomsxb6H6AhqOkaeBY-X1ZKBTbFAzDKUM/pub?output=csv' DATA_INPUT_PATH = '../data/input' DATA_OUTPUT_PATH = '../data/' # + line_changes = pd.read_csv(GOOGLE_SHEET_URL, usecols={'Line Number', 'Line Label', 'Line Description', 'Route changes','Other changes','Schedule Changes','Stop Cancellations', 'Lines Merged', 'Line Discontinued','Details', 'Service', 'Route', 'Schedule', 'Current Schedule URL'}) line_changes.columns = ["line-number","line-label","line-description",'route-changes','other-changes','schedule-changes','stop-cancellations',"lines-merged","line-discontinued","details","card-1","card-2","card-3","current-schedule-url"] line_changes = line_changes.fillna('') line_changes.head() # + # import shutil import os #define the folders to look through folders = os.listdir("../files/schedules") #set an array for the file types pdfs_list = [] #create a list of file types for root, dirs, files in os.walk("../files/schedules"): for filename in files: lines = filename.replace(" ","").split("_TT")[0].split("-") for line in lines: this_schedule = {} this_schedule['line-number'] = line.lstrip("0") this_schedule['schedule-url'] = "./files/schedules/"+filename pdfs_list.append(this_schedule) # print(line) # print(pdfs_list) schedule_df = pd.DataFrame(pdfs_list) schedule_df.tail(10) # - schedule_df['line-number'] = schedule_df['line-number'].astype(int) line_changes['line-number'] = line_changes['line-number'].astype(int) merged_lines = line_changes.merge(schedule_df, on=['line-number'],how='outer').fillna('') merged_lines # + merged_lines.to_json(DATA_OUTPUT_PATH + 'line-changes.json', orient='records') # As of 8/16/21 - total should be 125 lines. print(str(len(merged_lines)) + ' lines') # - #
notebooks/line-changes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="uJawRTaqSyR3" import numpy as np import math # + id="fM-tTUyZTAKp" def legendre_poly(n): if n == 0: return [1] elif n == 1: return [1,0] else: a = legendre_poly(n-1) a.append(0) a = np.array(a) b = [0,0] b.extend(legendre_poly(n-2)) b = np.array(b) c = ((2*n-1)*a-(n-1)*b)/n return list(c) # + id="n_eV9rBiTGlP" def Gauss_Legendre(n): x = np.roots(np.array(legendre_poly(n))) A = np.zeros((n,n)) b = np.zeros((n,1)) A[0,:] = np.ones(n) b[0] = 2 for i in range (1,n): for j in range(n): A[i,j] = x[j]**i if i % 2 == 1: b[i] = 0 else: b[i] = 2 / (i+1) w = np.matmul(np.linalg.inv(A),b) return x,w # + id="VkV2XjXSTH9G" def Gauss_Legendre_quadrature(f,n): y = [] x,w = Gauss_Legendre(n) for i in range (n): y.append(f(x[i])) return np.matmul(np.transpose(y),w)[0] # + colab={"base_uri": "https://localhost:8080/"} id="B9M3MDBdTJVA" outputId="3a0b6992-b39b-483c-b360-026dd98c7ed9" exact = math.exp(1)-math.exp(-1) print(abs(Gauss_Legendre_quadrature(math.exp,4) - exact)) print(abs(Gauss_Legendre_quadrature(math.exp,8) - exact))
Numerical_quadrature_gaussian.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1 align=center>Starbucks Project</h1> # <h2 align=center>Machine Learning Nanodegree Capstone</h2> # <hr style="border: 1px solid black"> # # ## I. Definition # # ### Project Overview # # The purpose of this project is to analyze from simulated Starbucks' customers data provided by Starbucks and Udacity, in order to gain insight on the relationship of the customers' attributes and their response to promotional offers being given to them. # # Once in a while, Starbucks sends promotional offer to its mobile customers and the data gained from it are being used to simulate the dataset this project is based on. # # From a business perspective, it is important to understand whether an offer is effective and how to personalize offers based on customers' attributes. This personalization could improve the efficacy of the promotional offer itself and might even increase the revenue, if more people are being attracted to buy based on that personalized offer. # # Some research has been conducted using machine learning model to classify things based on marketing data. It is a good practice to learn from them before solving problems in the marketing area and using the marketing data. The followings are some of them: # # * [https://www.researchgate.net/publication/282657577_Marketing_Research_Data_Classification_by_Means_of_Machine_Learning_Methods](https://www.researchgate.net/publication/282657577_Marketing_Research_Data_Classification_by_Means_of_Machine_Learning_Methods) # * [https://www.researchgate.net/publication/260707025_Using_Neural_Networks_for_Marketing_Research_Data_Classification](https://www.researchgate.net/publication/260707025_Using_Neural_Networks_for_Marketing_Research_Data_Classification) # # # Also, this project is a great fit for students of Data Science or Machine Learning to tinker on, since it would widen their experience on a different kind of dataset and also for them to engineer features that matter and algorithm that would perform best. # # ### Problem Statement # > Would a customer respond to a particular offer? # # * The problem of this project would be a classification problem: there needs to be a classification of whether a promotional offer is going to make a customer reponds or not. # * An approach to this problem would be to see if there could be a pattern emerged from customer's attributes and the promotional offer's data (duration, rewards, etc.) to determine whether a customer would respond to a promotional offer: customer's attributes and promotional offer's data to be the inputs and a binary classification of responding or not would be the output. # * Thus, a model needs to be built based on those inputs and it is expected to output a binary classification: whether a customer would respond (with the value of `1`) or not (with the value of `0`). # # ### Metrics # The metric to be used for the evaluation of this project would be the accuracy level, since it is more important to maximize the true positives and true negatives (whether an offer would get a response), rather than to minimize the false positives or false negatives (customers get an offer s/he would not respond to). # # Additionally, a preliminary data exploration suggests a slight imbalance, but still not large enough for the accuracy metrics to render to be a bad metric. Even so, an F1 metric would also be analyzed further to complement the accuracy measure. # ## II. Analysis # # ### Data Exploration # import python libraries import pandas as pd import numpy as np # read in the json files portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True) profile = pd.read_json('data/profile.json', orient='records', lines=True) transcript = pd.read_json('data/transcript.json', orient='records', lines=True) # *** # #### 1. Portfolio Data Exploration # view the portfolio data portfolio # get the high level view of the portfolio data portfolio.describe() # get the data types of the portfolio data portfolio.dtypes # get value counts of offer types portfolio['offer_type'].value_counts() # **Remarks**: Portfolio dataset only has ten data points or offers, thus it is easy to explore the data without using any python coding. From the overview of the data above, it can be seen that `channels` contains a list of channels where the promotional offers are sent with and that the `offer_type` contains categorical value of what kind of offer it is. Both of those columns could be expanded by using the one-hot encoding method in the data preprocessing step. # # Also, there is no abnormalities in the values of the data. One thing to notice would be that the `informational` offer does not give any kind of rewards to the customers: it might contain only informational news, e.g. highlighting the product features, as the type suggests. # *** # #### 2. Profile Data Exploration # view the profile data print(f'The shape of the dataset: {profile.shape}') profile.head(10) # get the high level view of the profile data profile.describe() # get the data types of the profile data profile.dtypes # get missing value data per column for column in profile.columns: missing_count = profile[column].isnull().sum() print(f'Number of missing values in {column}: {missing_count} ({missing_count / profile.shape[0]} of the total data)') # get value counts of gender print('Value counts of gender in profile dataset:') profile['gender'].value_counts() # get value counts of id, in case there are any duplicates profile['id'].value_counts().sort_values(ascending=False).iloc[:10] # **Remarks**: There are 17,000 data points or customers in the profile dataset. Out of that number, there are 2,175 missing values or around 12.79% from the `gender` and `income` columns. # # For the missing values in the income column, I would impute it with the median income, as it would predict the central tendency of the income column. The trade-off is that there would be less variation from the income feature: the income feature would have less predictive power. # # However, since it only comprises of 12.79% of the total data, it might prove useful to still include these data points and impute the missing value, since removing them might decrease the variation of the other features, such as the age feature. # # For the missing values in the gender column, they would be taken into account by the use of one-hot encoding for the gender column: inferred by having 0 value in the M, F, and O columns (all the values in the gender column). I deliberately not imputing anything, since it might be useful to assume that an absence of the value here might have a predictive quality. # # Also, for the `became_member_on` column, since the values are now on a format of integer, they would be first processed by casting it into a datetime format, then they would be formatted further as timestamp, and then they would be scaled or normalized. # # Further, there are age values of `118` quite often showing in the data point which has missing value in the gender and income columns. This might be a placeholder value for data point with missing data. Depending on the distribution, which is going to be explored in the below step, I might replace these values with the median age instead. # *** # #### 3. Transcript Data Exploration # view the profile data print(f'The shape of the dataset: {transcript.shape}') transcript.head(10) # get the high level view of the profile data transcript.describe() # get the data types of the profile data transcript.dtypes # get missing value data per column for column in transcript.columns: missing_count = transcript[column].isnull().sum() print(f'Number of missing values in {column}: {missing_count} ({missing_count / transcript.shape[0]} of the total data)') # get value counts of event print('Value counts of event in transcript dataset:') transcript['event'].value_counts() # + from tqdm import tqdm from pprint import pprint # get a view on the value column, based on the event value value_event_set = set() idx = 0 for value in tqdm(transcript['value']): value_keys = tuple(value.keys()) event = transcript.iloc[idx]['event'] value_event_set.add((event, value_keys)) idx += 1 print("The set of combinations of event and value columns' values in the data set") pprint(value_event_set) # - # **Remarks**: There seems to be no missing values in this dataset, which is great. However, there are some tricky structure of the values, which corresponds exactly on what the event type is, e.g. there are two ways to get the offer ID: `offer_id` and `offer id`, depending on the value of the event. This would be relevant in how to preprocess the data to get the data points which are to be the input of the models to be trained below. # # In general, I would like to produce a dataset whose data points consist of costumer's attributes, the offer's attributes, and the class of whether the customer responds. A response would then be defined as whether an offer being received by the customer (inferred from the `offer_id` key in the value column and the `offer received` value in the event column) ended up being used by the customer (inferred from the `offer id` key in the value column and the `offer completed` value in the event column). # # Thus, I might need to produce new columns for the `offer_id` inferred from the `value` column in this dataset and it might be the case that the key of `reward` and `amount` would not be included, since we only care about whether the customers respond, not how much revenue from transaction has an offer generated. # ### Exploratory Visualization # import python libraries import matplotlib.pyplot as plt import seaborn as sns # *** # #### 1. Portfolio Data Visualization # + # get visual counts of offer types offer_types = portfolio['offer_type'].value_counts() offer_types_index = offer_types.index plt.title('Offer Type Counts') plt.xlabel('Offer Type') plt.ylabel('Counts') sns.barplot(offer_types_index, list(offer_types)); # - # **Remarks**: There are four bogos, four discounts, and two informationals type of offers in the portfolio dataset. The imbalance here (of the informational type being only half as many as the others) does not really relevant, since the relevant one would be the imbalance of the data for each label class (the respond and the non-repond class). # *** # #### 2. Profile Data Visualization # + # get the visual of customers' age customer_age = list(profile['age']) plt.title('Distribution of Customer Age') plt.xlabel('Age') plt.ylabel('Density') sns.distplot(customer_age); # - # **Remarks**: It is highly likely that the age of `118` found above is the result of a placeholder value assigned whenever there are no data for the customer (gender and income). Thus, I will replace the value with the median age instead. # + # get visual counts of customers' gender customer_gender = profile['gender'].value_counts() customer_gender_index = customer_gender.index plt.title('Customer Gender Counts') plt.xlabel('Gender') plt.ylabel('Counts') sns.barplot(customer_gender_index, list(customer_gender)); # - # **Remarks**: There is imbalance by gender, but there should be no relevant consequences for this, as the only relevant imbalance would be the one based on the class label. # + # get the visual of customers' income customer_income = profile['income'] customer_income = customer_income.dropna() # drop missing values (NaN) plt.title('Distribution of Customer Income') plt.xlabel('Income') plt.ylabel('Density') sns.distplot(customer_income); # - # **Remarks**: The income data is quite good: it could resemble a rightly-skewed distribution. Thus standardizing the values of this column could be done without any other preprocessing step. # + # get the visual of customers' became_member_on by date customer_join_date = pd.to_datetime(profile['became_member_on'], format='%Y%m%d') plt.figure(figsize=(15, 7)) plt.title('Distribution of Customer Date of Join by Day') fig = sns.countplot(customer_join_date) fig.set_xticklabels(fig.get_xticklabels(), rotation=90); # + # get the visual of customers' became_member_on by month customer_join_date = pd.to_datetime(profile['became_member_on'] // 100, format='%Y%m') # integer division to obtain month plt.figure(figsize=(15, 7)) plt.title('Distribution of Customer Date of Join by Month') fig = sns.countplot(customer_join_date) fig.set_xticklabels(fig.get_xticklabels(), rotation=90); # - # **Remarks**: The distribution of the customer date of join is rather imbalance: the early dates have few customer join counts, while the later dates have many. On whether to use day or month casting, it might be useful to just stick with the day format, as it would result in a more granular value when doing scaling on the preprocessing step. # *** # #### 3. Transcript Data Visualization # + # get visual counts of events' type event_type = transcript['event'].value_counts() event_type_index = event_type.index plt.title('Event Type Counts') plt.xlabel('Event Type') plt.ylabel('Counts') sns.barplot(event_type_index, list(event_type)); # - # **Remarks**: The number of offers being used is about a half of the number of offers given. However, it cannot be assumed that the offers used are the result the offer being given to customer: the customer might not even be aware of the offer s/he received, right until after the transaction. This would be relevant in the way I am going to define what counts as a customer responding to an offer: s/he needs to be aware of it (offer viewed) before the transaction occurs. # + # get the visual of the events' time event_time = transcript['time'] plt.title('Distribution of Event by Time since Offers Given (in Hour)') plt.xlabel('Hour') plt.ylabel('Density') sns.distplot(list(event_time)); # - # **Remarks**: There seems to be a pattern of events based on the hour after the offers given. The huge spike in the beginning might be dominated by the events of offers being sent to the users. The other spikes would be unknownable by direct observation, but it suggests that this data might be a good feature in determining response for time-series forecasting. # # Thus, it would not be a relevant feature then for this project's problem, since we would like to know whether a customer would respond: the problem is agnostic to *when* does the customer responds or whether s/he responds *given a time* after an offer is received. # ### Algorithms and Techniques # 1. Preprocess the data: the rationale are provided from the data exploration above. # * Dropping duplicate rows from all datasets # * Impute missing values of customers income with the median of income data. # * Replace customers age of `118` with the median of age data. # * One-hot encode these columns: customer gender, offer channel, and offer type. # * Cast `became_member_on` column values into a timestamp of day format. # * Replace the `value` column of the transcript DF with the `offer_id` column with values inferred. # * Group the joined dataframe on customer ID then offer ID. # * Join the grouped dataframe with portfolio and profiles dataframes to add customers and offers attributes. # * Add labels to the data points by inferring response only to those customers who have received, viewed, and complete the offer. # * Min-max scale the values of the numerical columns (excluding the one-hot encoded). # * Split the data into train and test dataset. # 2. Obtain a benchmark result # * Train a basic Naive Bayes algorithm to obtain a benchmark result. # 3. Train other algorithm # * Train the following algorithms with grid-search and cross-validation: # * Logistic Regression # * Grid search parameters: # * penalty: ['l1', 'l2'] # * C: [1, 10, 100, 1000] # * max_iter: [25, 50, 100]} # * Cross validation fold: 5 # * Decision Tree # * Grid search parameters: # * criterion: ['gini', 'entropy'] # * max_depth: [None, 2, 5] # * min_samples_split: [2, 5, 10]} # * Cross validation fold: 5 # * Random Forest # * Grid search parameters: # * n_estimators: [25, 50, 100] # * criterion: ['gini', 'entropy'] # * max_depth: [None, 2, 5] # * min_samples_split: [2, 5, 10]} # * Cross validation fold: 5 # * Support Vector Machine # * Grid search parameters: # * kernel: ['linear', 'rbf'] # * C: [1, 10, 100] # * max_iter: [-1, 15, 30]} # * Cross validation fold: 5 # 4. Determine the model with the best result # ### Benchmark # A Naive Bayes algorithm would be used to be the benchmark algorithm in predicting whether a customer would respond to a particular offer. This is due to that Naive Bayes is a basic algorithm in solving a binary classification problem, without any presumption on what algorithm would best predict the classification, such as that of the problem statement above. # ## III. Methodology # # ### Data Preprocessing # *** # #### Dropping duplicates # view the portfolio dataset portfolio # **Remarks on Portfolio Dataset**: There is no need to drop duplicates on the portfolio dataset, since it can be seen in the above DF, that there is no duplicate rows in the dataframe. # + # Dropping duplicates on profile dataset raw_profile_shape = profile.shape profile_clean = profile.drop_duplicates() # check the before & after shapes print(f'Shape of the raw profile data: {raw_profile_shape}') print(f'Shape of the clean profile data: {profile_clean.shape}') # + # Dropping duplicates on transcript dataset raw_transcript_shape = transcript.shape transcript_clean = transcript.copy() # freeze the value column's values first, to enable dropping duplicates transcript_clean['value'] = transcript_clean['value'].transform(lambda x: frozenset(x.items())) transcript_clean = transcript_clean.drop_duplicates() # change the values back to a dictionary transcript_clean['value'] = transcript_clean['value'].transform(lambda x: dict(x)) # check the before & after shapes print(f'Shape of the raw transcript data: {raw_transcript_shape}') print(f'Shape of the clean transcript data: {transcript_clean.shape}') # - # *** # #### Impute missing values # + # get the median income median_income = profile_clean['income'].median() # fill the nan in the income column profile_clean['income'] = profile_clean['income'].fillna(median_income) # view the top 10 of the clean profile DF profile_clean.head(10) # - # *** # #### Replace invalid age values # + # get the median age, excluding the 118 median_age = profile_clean[~(profile_clean['age'] == 118)]['age'].median() profile_clean['age'] = profile_clean['age'].replace(118, median_age) # - # view the top 10 of the clean profile DF profile_clean.head(10) # *** # #### One-hot encoding categorical values # + # get one hot encoding on customers' gender gender_dummies = pd.get_dummies(profile_clean['gender'], prefix='gender', dummy_na=True, drop_first=True) # concatenate the gender dummies to the clean profile DF, then drop the gender column profile_clean = pd.concat([profile_clean, gender_dummies], axis=1).drop('gender', axis=1) # view the top 10 of the clean profile DF profile_clean.head(10) # + # get one hot encoding on offers' channel # courtesy to : https://stackoverflow.com/questions/29034928/pandas-convert-a-column-of-list-to-dummies channels_dummies = pd.get_dummies(portfolio['channels'].apply(pd.Series).stack(), prefix='channel').sum(level=0).drop('channel_web', axis=1) # get one hot encoding on the offers' type type_dummies = pd.get_dummies(portfolio['offer_type'], prefix='type', drop_first=True) # # copy the portfolio DF portfolio_clean = portfolio.copy() # concatenate the dummies to the clean portfolio DF, then drop the channels & type column portfolio_clean = pd.concat([portfolio_clean, channels_dummies, type_dummies], axis=1).drop(['channels', 'offer_type'], axis=1) # view the clean portfolio DF portfolio_clean # - # *** # #### Cast `became_member_on` values # + # cast the became_member_on integer to timestamp profile_clean['became_member_on'] = pd.to_datetime(profile_clean['became_member_on'], format='%Y%m%d').transform(lambda x: pd.Timestamp.timestamp(x) // 86400) # integer divide by the number of seconds in a day # view the top 10 of the clean profile DF profile_clean.head(10) # - # *** # #### Replace the `value` column with `offer_id` values # + # parse the value column to be in a dataframe format values_df = transcript_clean['value'].apply(pd.Series) # view the top 10 of the values DF values_df.head(10) # - # check the shape of the values_df: of whether it corresponds well with the transcript_clean DF print(f'Shape of values_df: {values_df.shape}') print(f'Shape of transcript_clean: {transcript_clean.shape}') # + # join the "offer_id" and "offer id" columns offer_id = values_df['offer id'].replace(np.nan, '') + values_df['offer_id'].replace(np.nan, '') # replace the empty string again to NaN as to not creating new offer id of an empty string when joined offer_id = offer_id.replace('', np.nan) # see the top 10 of offer_id offer_id[:10] # + # concat the offer_id to the transcript dataframe transcript_clean = pd.concat([transcript_clean, offer_id], axis=1) transcript_clean.columns = list(transcript_clean.columns)[:-1] + ['offer_id'] # change name of the offer_id column transcript_clean = transcript_clean.drop('value', axis=1) # view the top 10 of the clean transcript DF transcript_clean.head(10) # - # *** # #### Grouped transcript based on customer ID and offer ID and infer response label # one-hot encode event values to infer response event_dummies = pd.get_dummies(transcript_clean['event'])[['offer completed', 'offer received', 'offer viewed']] # only get the offer events # + # concat the event dummies to the merged df transcript_clean_dummies = pd.concat([transcript_clean, event_dummies], axis=1).drop('event', axis=1) # view the top 10 of transcript_clean_dummies transcript_clean_dummies.head(10) # + # group based on customer ID and offer ID transcript_grouped = transcript_clean_dummies.groupby(['person', 'offer_id'])['offer received', 'offer viewed', 'offer completed'].sum().reset_index() # view the top 10 of the transcript_grouped transcript_grouped.head(10) # + # infer label: whether a customer responds to an offer (1) or not (0) offer_used = list() for row in transcript_grouped.iterrows(): # infer whether an offer is used based on whether a customer received, viewed # and completed the offer at least once is_offer_used = (row[1]['offer received'] > 0) and \ (row[1]['offer viewed'] > 0) and \ (row[1]['offer completed'] > 0) if is_offer_used: offer_used.append(1) else: offer_used.append(0) offer_used = pd.Series(offer_used) # + # concat the label to the transcript_grouped DF transcript_grouped = pd.concat([transcript_grouped, offer_used], axis=1) transcript_grouped.columns = list(transcript_grouped.columns)[:-1] + ['offer_used'] # rename the column name transcript_grouped = transcript_grouped.drop(['offer received', 'offer viewed', 'offer completed'], axis=1) # drop the offer event dummy columns # view the top 10 of the transcript_grouped transcript_grouped.head(10) # - # *** # #### Join dataframes # + # match the join key column name profile_clean.columns = ['age', 'became_member_on', 'person', 'income', 'gender_M', 'gender_O', 'gender_nan'] portfolio_clean.columns = ['difficulty', 'duration', 'offer_id', 'reward', 'channel_email', 'channel_mobile', 'channel_social', 'type_discount', 'type_informational'] # check the columns renaming print('Profile DF') print(profile_clean.head()) print('--------------------') print('Portfolio DF') print(portfolio_clean.head()) # + # join the DFs join_df = pd.merge(transcript_grouped, profile_clean, how='left', on='person') join_df = pd.merge(join_df, portfolio_clean, how='left', on='offer_id') # view the top 10 of merged_df join_df.head(10) # - # *** # #### Standardize the values # + from sklearn.preprocessing import MinMaxScaler # instantiate the min-max scaler scaler = MinMaxScaler() # - # train the scaler features = join_df[join_df.columns[3:]] scaler.fit(features) # + # transform the features features_scaled = scaler.transform(features) features_scaled_df = pd.DataFrame(features_scaled, columns=features.columns) # view the top 10 scaled features DF features_scaled_df.head(10) # - # *** # #### Split data into train and test datasets from sklearn.model_selection import train_test_split # + # get label y = join_df['offer_used'] # split the data X_train, X_test, y_train, y_test = train_test_split(features_scaled,y, test_size=.25, random_state=42) # - print(f'Shape of the X_train: {X_train.shape}') print(f'Shape of the X_test: {X_test.shape}') print(f'Shape of the y_train: {y_train.shape}') print(f'Shape of the y_test: {y_test.shape}') # ### Implementation # #### Train & evaluate the benchmark model from sklearn.model_selection import GridSearchCV, cross_val_score from sklearn.metrics import classification_report, accuracy_score, make_scorer from sklearn.naive_bayes import GaussianNB # train the benchmark, Logistic Regression model nb_clf = GaussianNB() nb_clf.fit(X_train, y_train) # + # evaluate the model y_prediction = nb_clf.predict(X_test) accuracy = accuracy_score(y_test, y_prediction) print(f'Accuracy: {accuracy}') print() print(classification_report(y_test, y_prediction)) # - # ### Refinement # # #### Train & evaluate Logistic Regression algorithm # + from sklearn.linear_model import LogisticRegression # train the benchmark, Logistic Regression model lr_tuned_params = {'penalty': ['l1', 'l2'], 'C': [1, 10, 100, 1000], 'max_iter': [25, 50, 100]} scoring = ['accuracy', 'f1'] # train and evaluate grid search and cross validation results # courtesy to https://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_digits.html for score in scoring: print("# Tuning hyper-parameters for %s" % score) print() lr_clf = GridSearchCV( LogisticRegression(), lr_tuned_params, scoring=score, cv=5 ) lr_clf.fit(X_train, y_train) print("Best parameters set found on training set:") print() print(lr_clf.best_params_) print() print("Grid scores on training set:") print() means = lr_clf.cv_results_['mean_test_score'] stds = lr_clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, lr_clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full training set.") print("The scores are computed on the full test set.") print() y_true, y_pred = y_test, lr_clf.predict(X_test) print(classification_report(y_true, y_pred)) print() # - # #### Train & evaluate Decision Tree algorithm # + from sklearn.tree import DecisionTreeClassifier # train the benchmark, Logistic Regression model dt_tuned_params = {'criterion': ['gini', 'entropy'], 'max_depth': [None, 2, 5], 'min_samples_split': [2, 5, 10]} scoring = ['accuracy', 'f1'] # train and evaluate grid search and cross validation results # courtesy to https://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_digits.html for score in scoring: print("# Tuning hyper-parameters for %s" % score) print() dt_clf = GridSearchCV( DecisionTreeClassifier(), dt_tuned_params, scoring=score, cv=5 ) dt_clf.fit(X_train, y_train) print("Best parameters set found on training set:") print() print(dt_clf.best_params_) print() print("Grid scores on training set:") print() means = dt_clf.cv_results_['mean_test_score'] stds = dt_clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, dt_clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full training set.") print("The scores are computed on the full test set.") print() y_true, y_pred = y_test, dt_clf.predict(X_test) print(classification_report(y_true, y_pred)) print() # - # #### Train & evaluate Random Forest algorithm # + from sklearn.ensemble import RandomForestClassifier # train the benchmark, Logistic Regression model rf_tuned_params = {'n_estimators': [25, 50, 100], 'criterion': ['gini', 'entropy'], 'max_depth': [None, 2, 5], 'min_samples_split': [2, 5, 10]} scoring = ['accuracy', 'f1'] # train and evaluate grid search and cross validation results # courtesy to https://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_digits.html for score in scoring: print("# Tuning hyper-parameters for %s" % score) print() rf_clf = GridSearchCV( RandomForestClassifier(), rf_tuned_params, scoring=score, cv=5 ) rf_clf.fit(X_train, y_train) print("Best parameters set found on training set:") print() print(rf_clf.best_params_) print() print("Grid scores on training set:") print() means = rf_clf.cv_results_['mean_test_score'] stds = rf_clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, rf_clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full training set.") print("The scores are computed on the full test set.") print() y_true, y_pred = y_test, rf_clf.predict(X_test) print(classification_report(y_true, y_pred)) print() # - # #### Train & evaluate Support Vector Machine algorithm # + from sklearn.svm import SVC # train the benchmark, Logistic Regression model svm_tuned_params = {'kernel': ['linear', 'rbf'], 'C': [1, 10, 100], 'max_iter': [-1, 15, 30]} scoring = ['accuracy', 'f1'] # train and evaluate grid search and cross validation results # courtesy to https://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_digits.html for score in scoring: print("# Tuning hyper-parameters for %s" % score) print() svc_clf = GridSearchCV( SVC(), svm_tuned_params, scoring=score, cv=5 ) svc_clf.fit(X_train, y_train) print("Best parameters set found on training set:") print() print(svc_clf.best_params_) print() print("Grid scores on training set:") print() means = svc_clf.cv_results_['mean_test_score'] stds = svc_clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, svc_clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full training set.") print("The scores are computed on the full test set.") print() y_true, y_pred = y_test, svc_clf.predict(X_test) print(classification_report(y_true, y_pred)) print() # - # ## IV. Results # # ### Model Evaluation and Validation # #### Result Summary # * Naive Bayes (Benchmark) # * Accuracy: 0.6026418910377955 # * F1-score: 0.57 # * Logistic Regression # * Best Parameters: {'C': 1, 'max_iter': 25, 'penalty': 'l2'} # * Best Accuracy: 0.753 # * F1-score: 0.76 # * Decision Tree # * Best Parameters: {'criterion': 'gini', 'max_depth': 5, 'min_samples_split': 2} # * Best Accuracy: 0.750 # * F1-score: 0.75 # * Random Forest # * Best Parameters: {'criterion': 'gini', 'max_depth': None, 'min_samples_split': 10, 'n_estimators': 100} # * Best Accuracy: 0.772 # * F1-score: 0.77 # * Support Vector Machine # * Best Parameters: {'C': 100, 'kernel': 'rbf', 'max_iter': -1} # * Best Accuracy: 0.769 # * F1_Score: 0.77 # ### Justification # Based on the result summarized aboved, the best model found turns out to be the one using the algorithm of Random Forest with the parameters of `C=1, max_iter=25, penalty='l2'` with accuracy of 0.772 and an F1-score of 0.77. # # This result is significantly better than the benchmark one: accuracy of 0.603 and F1-score of 0.57. It is also definitely better than randomly assigning an offer to a customer: a random offer would have a 0.5 chance of getting response from the customer (naive calculation of binary outcomes: response and non-response). # # As to the problem of determining if an offer would get a customer response, aside from the accuracy of 0.772, the Random Forest model has 0.81 F1-score for the non-response label and 0.70 F1-score for the response label: this is better than the benchmark one of 0.52 F1-score for the non-response label and 0.66 F1-score for the response label, not to mention the naive random way of assigning offers.
Starbucks Capstone Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <small><small><i> # Introduction to Python for Bioinformatics - available at https://github.com/kipkurui/Python4Bioinformatics. # </i></small></small> # # # ## Reproducible Bioinformatics Research # # How can we use Jupyter Notebooks, Conda environments, Bioconda Channel and GitHub to ensure reproducible Bioinformatics Research? To explore these topics, we'll use various Open learning resource online: # - [Bioinformatics best practices](https://github.com/griffithlab/rnaseq_tutorial/wiki/Bioinformatics-Best-Practices) # - [Bioconda promises to ease bioinformatics software installation woes](http://blogs.nature.com/naturejobs/2017/11/03/techblog-bioconda-promises-to-ease-bioinformatics-software-installation-woes/) # - Read the paper: [Bioconda: A sustainable and comprehensive software distribution for the life sciences](https://doi.org/10.1101/207092) # # # ### 1. Conda environments # We've seen how you can create a conda environment. But how can you ensure someone else reproduces your set up? We'll also learn how to create environments for different projects. # # ### 2. Bioconda Chanel # # Here, we'll explore some of the useful Bioinformatics packages in this channel, and how we can use them to conduct reproducible research. # # # ### 3. GitHub # You have a reproducible environment and research notebook, how can your version and make your research accessible by others? This will be a quick introduction to version control with Git and GitHub.
Notebooks/09.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="x3uxbPbddh1t" # ## Check the GPU # + id="Ib92JY4KWXBP" # %tensorflow_version 2.x import tensorflow as tf import timeit device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') def cpu(): with tf.device('/cpu:0'): random_image_cpu = tf.random.normal((100, 100, 100, 3)) net_cpu = tf.keras.layers.Conv2D(32, 7)(random_image_cpu) return tf.math.reduce_sum(net_cpu) def gpu(): with tf.device('/device:GPU:0'): random_image_gpu = tf.random.normal((100, 100, 100, 3)) net_gpu = tf.keras.layers.Conv2D(32, 7)(random_image_gpu) return tf.math.reduce_sum(net_gpu) cpu() gpu() # Run the op several times. print('Time (s) to convolve 32x7x7x3 filter over random 100x100x100x3 images ' '(batch x height x width x channel). Sum of ten runs.') print('CPU (s):') cpu_time = timeit.timeit('cpu()', number=10, setup="from __main__ import cpu") print(cpu_time) print('GPU (s):') gpu_time = timeit.timeit('gpu()', number=10, setup="from __main__ import gpu") print(gpu_time) print('GPU speedup over CPU: {}x'.format(int(cpu_time/gpu_time))) # + [markdown] id="N7hwoMBBdync" # # Load the dataset # + id="p3TyuDTj-rxN" from google.colab import files # Upload your kaggle.json file with your username and your Kaggle API token. files.upload() # + id="X45MxKFo_FxG" # Let's make sure the kaggle.json file is present. # !ls -lha kaggle.json # Next, install the Kaggle API client. # !pip install -q kaggle # The Kaggle API client expects this file to be in ~/.kaggle, # so move it there. # !mkdir -p ~/.kaggle # !cp kaggle.json ~/.kaggle/ # This permissions change avoids a warning on Kaggle tool startup. # !chmod 600 ~/.kaggle/kaggle.json # + id="_wwp82A0Assa" # %cd /content/drive/My\ Drive/ConditionalVAE_DL_Project3 # + id="9ZfncmuFA9ho" # !python3 dataloader.py # + [markdown] id="jHPEAqULod-7" # # Build the dataset # + id="BWyewtTroc8J" from celeba import CelebADataset # Training configuration learning_rate = 0.001 train_size = 0.01 batch_size = 32 save_test_set = True # S# True: the test set image IDs and other useful information will be stored in a pickle file to further uses (e.g. Image_Generation.ipynb) dataset = CelebADataset(train_size = train_size, batch_size = batch_size, save_test_set = save_test_set) # + [markdown] id="Cck3nhf67ad7" # # Define the model # + id="XhGfANyK7gyr" # Hyper-parameters label_dim = 40 image_dim = [64, 64, 3] latent_dim = 128 beta = 0.65 # + id="oJcUegj6PT58" import tensorflow as tf from ConvolutionalCondVAE import ConvCVAE, Decoder, Encoder # Model encoder = Encoder(latent_dim) decoder = Decoder() model = ConvCVAE( encoder, decoder, label_dim = label_dim, latent_dim = latent_dim, beta = beta, image_dim = image_dim) # Optiizer optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) # + [markdown] id="V2mvjb9jC4Oh" # # Checkpoint # + id="pvzfH8T9C6Xj" import os # Checkpoint path checkpoint_root = "./CVAE{}_{}_checkpoint".format(latent_dim, beta) checkpoint_name = "model" save_prefix = os.path.join(checkpoint_root, checkpoint_name) # Define the checkpoint checkpoint = tf.train.Checkpoint(module=model) # + id="Uzkw-ICrDQli" # Restore the latest checkpoint latest = tf.train.latest_checkpoint(checkpoint_root) if latest is not None: checkpoint.restore(latest) print("Checkpoint restored:", latest) else: print("No checkpoint!") # + [markdown] id="JBfn4EZB9z_i" # # Training # + id="DSxE0asz94X3" import numpy as np import time from utils import train_step train_losses = [] train_recon_errors = [] train_latent_losses = [] loss = [] reconstruct_loss = [] latent_loss = [] step_index = 0 n_batches = int(dataset.train_size / batch_size) n_epochs = 30 print("Number of epochs: {}, number of batches: {}".format(n_epochs, n_batches)) # Epochs Loop for epoch in range(5): start_time = time.perf_counter() dataset.shuffle() # Shuffling # Train Step Loop for step_index, inputs in enumerate(dataset): total_loss, recon_loss, lat_loss = train_step(inputs, model, optimizer) train_losses.append(total_loss) train_recon_errors.append(recon_loss) train_latent_losses.append(lat_loss) if step_index + 1 == n_batches: break loss.append(np.mean(train_losses, 0)) reconstruct_loss.append(np.mean(train_recon_errors, 0)) latent_loss.append(np.mean(train_latent_losses, 0)) exec_time = time.perf_counter() - start_time print("Execution time: %0.3f \t Epoch %i: loss %0.4f | reconstr loss %0.4f | latent loss %0.4f" % (exec_time, epoch, loss[epoch], reconstruct_loss[epoch], latent_loss[epoch])) # Save progress every 5 epochs if (epoch + 1) % 5 == 0: checkpoint.save(save_prefix + "_" + str(epoch + 1)) print("Model saved:", save_prefix) # Save the final model checkpoint.save(save_prefix) print("Model saved:", save_prefix) # + [markdown] id="Pfl3PjRywq3U" # # Loss Visualization # + id="5yfbVKF-vY23" import matplotlib.pyplot as plt plt.plot(reconstruct_loss, 'g', marker ='o') plt.grid() plt.show(); plt.plot(latent_loss, 'b', marker = 'o') plt.grid() plt.show(); plt.plot(loss, 'r', marker ='o') plt.grid() plt.show();
notebooks/Train_ConditionalVAE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + uuid="2a0489ae-087b-4cb2-b72d-af809d62eae5" import pandas as pd import numpy as np path='./data/' train_data=pd.read_csv('train.csv') test_data=pd.read_csv('testA.csv') print('Train data shape:',train_data.shape) print('TestA data shape:',test_data.shape) # + uuid="456f4256-ca2e-43c3-8702-bfcf5517b2d7" train_data.head() # + uuid="dac9f38e-9380-4f27-8f2e-1f3cf30def64" from sklearn.metrics import accuracy_score, precision_score, recall_score from sklearn.metrics import f1_score y_true = [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4,5,5,6,6,6,0,0,0,0] #真实值 y_pred = [1, 1, 1, 3, 3, 2, 2, 3, 3, 3, 4, 3, 4, 3,5,1,3,6,6,1,1,0,6] #预测值 #计算准确率 print("accuracy:", accuracy_score(y_true, y_pred)) #计算精确率 #计算macro_precision print("macro_precision", precision_score(y_true, y_pred, average='macro')) #计算micro_precision print("micro_precision", precision_score(y_true, y_pred, average='micro')) #计算召回率 #计算macro_recall print("macro_recall", recall_score(y_true, y_pred, average='macro')) #计算micro_recall print("micro_recall", recall_score(y_true, y_pred, average='micro')) #计算F1 #计算macro_f1 print("macro_f1", f1_score(y_true, y_pred, average='macro')) #计算micro_f1 print("micro_f1", f1_score(y_true, y_pred, average='micro')) # + uuid="9808eaa8-d731-4447-914d-5160f294208a" def abs_sum(y_pre,y_tru): #y_pre为预测概率矩阵 #y_tru为真实类别矩阵 y_pre=np.array(y_pre) y_tru=np.array(y_tru) loss=sum(sum(abs(y_pre-y_tru))) return loss # + uuid="6b77357c-cb8d-447c-92e1-158380e12f4f" y_pre=[[0.1,0.1,0.7,0.1],[0.1,0.1,0.7,0.1]] y_tru=[[0,0,1,0],[0,0,1,0]] print(abs_sum(y_pre,y_tru))
T1 - Understanding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %run ../Python_files/util.py link_label_dict = zload('../temp_files/link_label_dict_MA_journal_network.pkz') link_label_dict['76'] # + import json N = np.zeros((74, 258)) N_dict = {} for j in range(np.shape(N)[1]): for i in range(np.shape(N)[0]): if (str(i+1) == link_label_dict[str(j)].split('->')[0]): N[i, j] = 1 elif (str(i+1) == link_label_dict[str(j)].split('->')[1]): N[i, j] = -1 key = str(i) + '-' + str(j) N_dict[key] = N[i, j] with open('../temp_files/node_link_incidence_MA_journal.json', 'w') as json_file: json.dump(N_dict, json_file) zdump(N, '../temp_files/node_link_incidence_MA_journal.pkz') # - N N_dict
09_2_develop_new_OD_demand_estimator_MA_journal_Dijkstra_uni_class/02_create_node_link_incidence_MA_journal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="0z0WVe5LetML" # ### Installation # + id="zuk6OHNSeve3" pip install -q tensorflow tensorflow-datasets # - # !pip install tensorflow_datasets # + [markdown] id="6cikfadSew47" # #### Imports # + id="U9grIZb9eyrT" import tensorflow as tf import matplotlib.pyplot as plt import numpy as np from tensorflow import keras import tensorflow_datasets as tfds # + [markdown] id="ZxeO5C5pez4W" # ### Checking datasets # + colab={"base_uri": "https://localhost:8080/"} id="p_h1KAxde25T" outputId="dd791cfc-0ddd-4dcb-d72e-15a4478d39e0" print(tfds.list_builders()) # + [markdown] id="NSHwNJ5ie3ea" # ### Getting data Infomation # + colab={"base_uri": "https://localhost:8080/"} id="YfHKJNp6e7qZ" outputId="7391cff5-f452-423d-83f0-d5443f965279" builder = tfds.builder('rock_paper_scissors') info = builder.info print(info) # + [markdown] id="VUxQNMJpe8J0" # ### Data Preparation # + id="LfI1BqNee_GT" train = tfds.load(name='rock_paper_scissors', split="train") test = tfds.load(name='rock_paper_scissors', split='test') # + [markdown] id="oo9cYBiUhgzL" # ### Iterating over data # > To iterate over a tensorflow dataset we do it as follows # + colab={"base_uri": "https://localhost:8080/"} id="isR5nRe3gtp3" outputId="c5d368c2-5806-4101-df63-17eafac7092c" for data in train: print(data['image'], data['label']) break # + [markdown] id="oO_KP18lh1Rk" # ### Creating a Numpy data # > We are going to scale our data and convert it to a nummpy array # + id="1Sui-N2hhE-8" train_images = np.array([data['image'].numpy()/255 for data in train]) train_labels =np.array([data['label'].numpy() for data in train]) test_image = np.array([data['image'].numpy()/255 for data in test]) test_labels = np.array([data['label'].numpy() for data in test]) # + colab={"base_uri": "https://localhost:8080/"} id="coydWR5FjMoX" outputId="0b884de2-31ea-4912-801d-2d576b8b518e" train_images[0], train_images.shape # + [markdown] id="YjUpcC6vjo6_" # ### Class Names # 0 - Rock # # 1 - Paper # # 2 - Scissors # + id="wfSaxWgThFF2" class_names = np.array(["rock", "paper", "scissor"]) # + [markdown] id="cjdKvufNfBi2" # ### Creating a NN # + colab={"base_uri": "https://localhost:8080/"} id="_S6hEaT-fEkF" outputId="93913bad-203f-4001-bc60-a452de8d55c1" input_shape = train_images[0].shape input_shape # + colab={"base_uri": "https://localhost:8080/"} id="Gj5gZ5i2kJFi" outputId="e940012e-f0ca-45e5-e0f2-399e77e922cb" model = keras.Sequential([ keras.layers.Conv2D(32, (3, 3), input_shape=input_shape, activation='relu'), keras.layers.MaxPool2D((3,3)) , keras.layers.Conv2D(64, (2, 2), activation='relu'), keras.layers.MaxPool2D((2,2)), keras.layers.Conv2D(64, (2, 2), activation='relu'), keras.layers.MaxPool2D((2,2)), keras.layers.Flatten(), keras.layers.Dense(64, activation='relu'), keras.layers.Dense(32, activation='relu'), keras.layers.Dense(3, activation='softmax') ]) model.summary() # + [markdown] id="LvEJSTvVlXJU" # ### Combiling the Model # + id="RYYk9qHalRKQ" model.compile( optimizer = keras.optimizers.Adam(learning_rate=.0001), metrics=["accuracy"], loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) ) # + [markdown] id="Kj-Xb2F6mkO2" # ### Fitting the ModeL # + colab={"base_uri": "https://localhost:8080/"} id="G61NVC4hlWAl" outputId="2f89b1f4-53a7-4957-a2cd-ad82663470ae" EPOCHS = 5 BATCH_SIZE = 4 VALIDATION_SET = (test_image, test_labels) history = model.fit(train_images, train_labels, epochs=EPOCHS, validation_data=VALIDATION_SET, batch_size=BATCH_SIZE) # + [markdown] id="zM2IVzrGyRT2" # ### Model Evaluation Conclusion # Our model is performing perfect. The loss on the train_set is almost 0 as well as the validation loss. The accuracy on the train set is `100%` compared to `83%` accuracy on the test set. # # > The model is just overtraining but giving us good results on the validation set. # + [markdown] id="yCR7k8Aoy6h7" # ### Making Predictions # # + colab={"base_uri": "https://localhost:8080/"} id="_zDtH2MHlWEC" outputId="b74c96b8-98a5-4790-c7f1-ff9c12e159c9" predictions = model.predict(test_image[:10]) for i, j in zip(predictions, test_labels[:10]): print(class_names[np.argmax(i)],"-------->", class_names[j]) # + id="b83QOHCV0IVn" # + [markdown] id="TWq4DlDc0CLs" # ### Tunning Hyper Parameters -- Keras-Tunner # * [Docs](https://www.tensorflow.org/tutorials/keras/keras_tuner) # # + [markdown] id="cl6I5Mt70V82" # ### Installation # + colab={"base_uri": "https://localhost:8080/"} id="EkJ24NeVlWKP" outputId="e17aeb9d-24f0-46a8-ef1b-5629d6359796" pip install -q -U keras-tuner # + [markdown] id="pZko7sZ_0oHz" # ### Importing # + id="5OPoGe010jq4" import kerastuner as kt # + id="o6H7Kfxi0qTJ" def model_builder(hp): model = keras.Sequential() # we want the model to find the best unit and the activation function for the first layer for us model.add(keras.layers.Conv2D(hp.Int('units', min_value=32, max_value=512, step=32),(3, 3), input_shape=input_shape, activation=hp.Choice('activation-fn',values=['relu', 'sgd']))) model.add(keras.layers.MaxPool2D((3,3))) model.add(keras.layers.Conv2D(64, (2, 2), activation='relu')) model.add(keras.layers.MaxPool2D((2,2))) model.add(keras.layers.Conv2D(64, (2, 2), activation='relu')) model.add(keras.layers.MaxPool2D((2,2))) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(64, activation='relu')) model.add(keras.layers.Dense(32, activation='relu')) model.add(keras.layers.Dense(3, activation='softmax')) model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) return model # + colab={"base_uri": "https://localhost:8080/"} id="JA-izSwo2e_a" outputId="55aa4da7-bf0b-49e8-b0d3-442d3a27f9ce" tuner = kt.Hyperband(model_builder, objective='val_accuracy', max_epochs=10, ) # + colab={"base_uri": "https://localhost:8080/"} id="4RDa3RWm3LzC" outputId="a3c6deb7-9ae0-4d1b-f9fd-1ee9f3761e9f" tuner.search(train_images, train_labels, validation_data=VALIDATION_SET, epochs=EPOCHS, batch_size=BATCH_SIZE) # + [markdown] id="GlEHL9V55nVZ" # > That's basically how the `kerastunner` works # + id="8X1FkBXi35Bn"
keras-nn/09_Conv_NN_TensorFlowDataSets/TensorFlowDataSets_RPS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.integrate as integrate from numba import jit ## from util import tools plt.rcParams['figure.figsize'] = (16, 4.5) plt.style.use('seaborn-whitegrid') path = './util/datastore/yield_italy_daily_2010_2020.xlsx' data = tools.load_BTP_curve(path) data = data diff_r = data.diff()[1:] tenors = np.array(data.columns.values) data.columns.values sigma = diff_r.cov() sigma *= 252 # annualized print("Sigma shape : " + str(sigma.shape)) # ### Volatility Fitting # The eigenvectors of the Covariance matrix are actually the directions of the axes where there is the most variance(most information) and that we call Principal Components. And eigenvalues are simply the coefficients attached to eigenvectors, which give the amount of variance carried in each Principal Component. # # #### PCA and Eigendecomposition # <ul> # <li>Obtain the Eigenvectors and Eigenvalues from the covariance matrix $\Sigma^{d\times d}$ </li> # <li>Sort eigenvalues in descending order and choose the $k$ eigenvectors that correspond to the k largest eigenvalues where $k$ is the number of dimensions of the new feature subspace $(k≤d)$.</li> # <li>Construct the projection matrix $\pmb{W}^{dxk}$ from the selected $k$ eigenvectors.</li> # <li> Transform the original dataset $\pmb{X}$ via $\pmb{W}$ to obtain a k-dimensional feature subspace Y. # # </ul> eigval, eigvec = np.linalg.eig(sigma) factors = 3 # Make a list of (eigenvalue, eigenvector) tuples eig_pairs = [(np.abs(eigval[i]), eigvec[:,i]) for i in range(len(eigval))] eig_pairs.sort(key=lambda x: x[0], reverse=True) print('Eigenvalues in descending order:') for i in eig_pairs[:5]: print(i[0]) print("Highest Eigenvalues:") print(eigval[:3]) tot = sum(eigval) # la somma degli autovettori è la varianza totale var_exp = [(i / tot)*100 for i in sorted(eigval, reverse=True)] cum_var_explained = np.cumsum(var_exp) print("Variance% explained by the first 3 Eigenvectors:") print(cum_var_explained[3]) [plt.bar(i +1, var_exp[i], alpha = 0.5, label = ("Component " + str(i +1))) for i in range(0,5)] [plt.step(i+1, cum_var_explained[i]) for i in range(0,5)] plt.title("Explained variance by components") plt.legend() plt.show() # Construction of the projection matrix that will be used to transform the original data onto the new feature subspace. Reducing the d-dimensional tenor space to a k-dimensional tenor subspace, by choosing the top $k$ eigenvectors with the highest eigenvalues to construct our $d×k$ dimensional eigenvector matrix $\pmb{W}^{d \times k}$. # $$\pmb{Y}^{n \times k} = \pmb{X}^{n \times d} \times \pmb{W}^{d \times k}$$ # Where $d$ is the BTP maturity, $n$ is the observation date and $k$ the number of principal components nfactors = 3 princ_eigvector = [eig_pairs[i][1] for i in range(0,nfactors)] princ_eigvalues = [eig_pairs[i][0] for i in range(0,nfactors)] W_matrix = np.vstack(princ_eigvector).T princ_eigvector Y = data.dot(W_matrix) plt.plot(W_matrix, marker='.'), plt.title('Principal Eigenvectors'), plt.xlabel(r'Time $t$'); # **Get volatility:** <br> # What is the norm that was used to scale the eigenvector? It is the square root of the sum of squares of the coefficicents in the vector, i.e. the square root of the variance. The eigenvalue is the square of this value, i.e. it is the sum of squares = total variance.<br> # https://stats.stackexchange.com/questions/346692/how-does-eigenvalues-measure-variance-along-the-principal-components-in-pca#:~:text=The%20eigenvalue%20is%20the%20square,sum%20of%20squares%20%3D%20total%20variance.&text=Then%20the%20scores%2C%20since%20they,data%20by%20each%20unit%20vector. vol_compressed = np.sqrt(princ_eigvalues)*W_matrix plt.plot(vol_compressed, marker = '.'), plt.xlabel("Time"), plt.ylabel('Volatility') plt.title("Discretized volatility $\sigma$") plt.show() # **Volatility fitting** <br> def interp_volatility(vols, tenors): """ Parameters ------- vols: volatility matrix to be interpolated (nObs x nFactors) tenors: array of tenors used for fitting Returns ------- x: Volatility fitted polynomials save_pmts: poly weigths """ x = np.zeros((vols.shape[0], vols.shape[1])) degree = 2 save_pmts = [] for i in range(0, vols.shape[1]): vol = np.array(vols[:,i].flatten()) fit_vol = np.polyfit(x = tenors, y = vol, deg = degree) x[:,i] = np.polyval(fit_vol, tenors) degree = 4 save_pmts.append(fit_vol) return x, save_pmts fitted_vol, rg = interp_volatility(vol_compressed, tenors) plt.subplot(1, 3, 1), plt.plot(tenors, fitted_vol[:,0]), plt.plot(tenors, vol_compressed[:, 0]) plt.legend(["Fitted Vol", "PC1 Vol"]) plt.subplot(1, 3, 2), plt.plot(tenors, fitted_vol[:,1]), plt.plot(tenors, vol_compressed[:, 1]) plt.legend(["Fitted Vol", "PC2 Vol"]) plt.subplot(1, 3, 3), plt.plot(tenors, fitted_vol[:,2]), plt.plot(tenors, vol_compressed[:, 2]) plt.legend(["Fitted Vol", "PC3 Vol"]) plt.show() # prova integrale numerico test = np.poly1d(rg[0]) # funzione del primo componente prova = integrate.quad(test, a = 1, b = 10)[0] prova # ### HJM framework (<NAME> p.232 pdf) # $$df(t, T) = \alpha dt + \sigma(t, T) dW (t) $$ # $$ f(0,T) = f^M(0,T)$$ # ### Gaussian HJM # Under the risk neutral measure $\mathbb{Q}$ the instantaneous forward rate process is: # $$df(t, T) = \sigma_f(t, T)^\top \sigma_P(t, T)dt + \sigma_f (t, T)^\top dW (t) $$ # that can be rewritten as (pg 495 pdf Andersen-Piterbarg): # $$df(t, T) = \sigma_f(t, T)^\top \int_t^T\sigma_f(t, u)du dt + \sigma_f (t, T)^\top dW (t) $$ # with $\sigma_f$ being a d-dimensional vector. <br> # Then we have specified the entire forward rate structure # $$P(t,T) = exp\bigg\{ - \int_t^T f(t,s) ds \bigg\} # $$ # #### Risk Neutral drift # Now use the fitted polynomial to define $\sigma_f$, the drift $\alpha(t,\tau)$ is calculated using numerical integration over that function <br><br> # $$ \alpha(t,T) = \sigma(t, T)\int_t^T\sigma(t, u)du =\sum_{i=1}^n \sigma_i(t,T) \int_t^T \sigma_i(t,u)du $$ # + vol_functions = [np.poly1d(coeff) for coeff in rg] # t = 0 def mu(T, vol_functions): mean = 0 for sigma in vol_functions: comp_mean = integrate.quad(sigma, 0, T)[0] * sigma(T) mean += comp_mean return mean # - simulation_tenors = np.linspace(0,30,12) simulation_drift = np.array([mu(T, vol_functions) for T in simulation_tenors]) simulation_vols = fitted_vol.T plt.subplot(1,2,1), plt.plot(simulation_tenors, simulation_drift), plt.xlabel("Time") plt.title("Risk Neutral Drift") plt.subplot(1,2,2), plt.plot(simulation_tenors, simulation_vols.T), plt.xlabel("Time") plt.title("Volatility") plt.show() # ### Simulation (single path) # $$f(t, T) = f(0, T) + \sum_{i=1}^n \sigma_i(t,T) \int_t^T \sigma_i(t,u)du + \sum_{i=1}^n \sigma_i(t,T) dW (t) # $$ spot_BTPcurve = data[-1::].values.flatten() time_grid = np.linspace(0,5,500) np.random.seed(12) # + #@jit(nopython=True) #def mc_path(f0, simulation_end, timesteps, simulation_tenors): # time_grid = np.linspace(0,simulation_end, timesteps) # f = np.zeros(len(simulation_tenors)) # f_hjm = np.zeros((len(simulation_tenors), len(time_grid))) # f_hjm[:,0] = f0 # lenvols = len(simulation_vols) # for i in range(1, len(time_grid)): # t = time_grid[i] # dt = t - time_grid[i-1] # # per ogni time step costruisce una curva intera di tassi forward # for T in range(len(simulation_tenors)): # # considerando 3 fattori di volatilità si dovranno anche considerare 3 BM non correlati # dW = np.random.normal(size = lenvols) # sigma = 0 # for nVol, vol in enumerate(simulation_vols): # sigma += vol[T]*dW[nVol] # f[T] = f0[T] + simulation_drift[T] *dt + sigma*np.sqrt(dt) # f0 = f # f_hjm[:,i] = f # return f_hjm[0,:], f_hjm # + #def fwd_alongMaturities(f0, simulation_tenors, simulation_vols, dt): # for T in range(len(simulation_tenors)): # # considerando 3 fattori di volatilità si dovranno anche considerare 3 BM non correlati # sigma = 0 # for nVol, vol in enumerate(simulation_vols): # sigma += vol[T]*np.random.normal() # f[T] = f0[T] + simulation_drift[T] *dt + sigma*np.sqrt(dt) # return f # - def mc_path(f0, simulation_end, timesteps, simulation_tenors): time_grid = np.linspace(0,simulation_end, timesteps) f = np.zeros(len(simulation_tenors)) f_hjm = np.zeros((len(simulation_tenors), len(time_grid))) f_hjm[:,0] = f0 lenvols = len(simulation_vols) for i in range(1, len(time_grid)): t = time_grid[i] dt = t - time_grid[i-1] # per ogni time step costruisce una curva intera di tassi forward for T in range(len(simulation_tenors)): # considerando 3 fattori di volatilità si dovranno anche considerare 3 BM non correlati sigma = 0 for nVol, vol in enumerate(simulation_vols): sigma += vol[T]*np.random.normal() f[T] = f0[T] + simulation_drift[T] *dt + sigma*np.sqrt(dt) f0 = f f_hjm[:,i] = f return f_hjm[0,:], f_hjm # + @jit(nopython=True) def trapz_integral(f, t, T): assert T >= t summ = f.sum() N = len(f) integral = ((T-t)/N)*summ return integral @jit(nopython=True) def zcb_price(f, t, T): assert T >= t exponent = trapz_integral(f, t, T) PtT = np.exp(-exponent) return PtT @jit(nopython=True) def get_discount_curve(simulation_end, timesteps, fwd_rates, t, T): time_grid = np.linspace(0,simulation_end, timesteps) assert T >= t assert time_grid.shape == fwd_rates.shape for timestep, time in enumerate(time_grid): last_time = len(time_grid[time_grid <= T]) forward_to_integrate = fwd_rates[:last_time] return zcb_price(forward_to_integrate, t, T) def mc_simulation(Nsimul, seed, f0, simulation_end, timesteps,simulation_tenors): np.random.seed(seed) fwd_simul = np.zeros((timesteps, Nsimul)) zcb_curve = np.zeros((timesteps, Nsimul)) for n in range(Nsimul): fwd_simul[:,n] = mc_path(f0, 5, timesteps, simulation_tenors)[0] zcb_curve[:,n] = np.array( [get_discount_curve( simulation_end, timesteps, fwd_simul[:,n], 0, taus) for taus in np.linspace(0,simulation_end, timesteps)]) return fwd_simul, zcb_curve # - seed = 123 fwd_simul, zcb_curve = mc_simulation(50, seed, spot_BTPcurve,5,500,simulation_tenors) plt.plot(fwd_simul) plt.show() plt.plot(zcb_curve); # $$P(t,T) = exp\bigg\{ - \int_t^T f(t,s) ds \bigg\} # $$ # Integrare $\int_t^T f(t,s) ds $ numericamente (trapezoidal rule)? # $$ # \int_{t}^{T} f(t, s)ds \approx \frac{(T - t)}{Nsample}\sum_{i=1}^n f(t, T_i) # $$ # plt.plot(fwd_simul.mean(axis = 1)) # + ## zcb price test #integra il forward istantaneo a t0 lungo tutto il time_grid # - inst_forward_path = f_hjm[0,:] a = np.array( [get_discount_curve(time_grid, inst_forward_path, t, taus) for taus in np.linspace(0,5, 100)]) a.shape plt.plot(a);
BTP_vol_study.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # # Deploy and monitor a machine learning workflow for Image Classification # ## Setting up this notebook # # Notes about the instance size and kernel setup: this notebook has been tested on # # 1. The `Python 3 (Data Science)` kernel # 2. The `ml.t3.medium` instance # # ## Data Staging # # We'll use a sample dataset called CIFAR to simulate the challenges Scones Unlimited are facing in Image Classification. In order to start working with CIFAR we'll need to: # # 1. Extract the data from a hosting service # 2. Transform it into a usable shape and format # 3. Load it into a production system # # In other words, we're going to do some simple ETL! # # ### 1. Extract the data from the hosting service # # In the cell below, define a function `extract_cifar_data` that extracts python version of the CIFAR-100 dataset. The CIFAR dataaset is open source and generously hosted by the University of Toronto at: https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz # + import requests def extract_cifar_data(url, filename="cifar.tar.gz"): """A function for extracting the CIFAR-100 dataset and storing it as a gzipped file Arguments: url -- the URL where the dataset is hosted filename -- the full path where the dataset will be written """ # Todo: request the data from the data url # Hint: use `requests.get` method r = requests.get(url = url) with open(filename, "wb") as file_context: file_context.write(r.content) return # - # Let's test it out! Run the following cell and check whether a new file `cifar.tar.gz` is created in the file explorer. extract_cifar_data("https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz") # ### 2. Transform the data into a usable shape and format # # Clearly, distributing the data as a gzipped archive makes sense for the hosting service! It saves on bandwidth, storage, and it's a widely-used archive format. In fact, it's so widely used that the Python community ships a utility for working with them, `tarfile`, as part of its Standard Library. Execute the following cell to decompress your extracted dataset: # + import tarfile with tarfile.open("cifar.tar.gz", "r:gz") as tar: tar.extractall() # - # A new folder `cifar-100-python` should be created, containing `meta`, `test`, and `train` files. These files are `pickles` and the [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html) provides a simple script that can be used to load them. We've adapted the script below for you to run: # + import pickle with open("./cifar-100-python/meta", "rb") as f: dataset_meta = pickle.load(f, encoding='bytes') with open("./cifar-100-python/test", "rb") as f: dataset_test = pickle.load(f, encoding='bytes') with open("./cifar-100-python/train", "rb") as f: dataset_train = pickle.load(f, encoding='bytes') # + # Feel free to explore the datasets dataset_train.keys() # - # As documented on the homepage, `b'data'` contains rows of 3073 unsigned integers, representing three channels (red, green, and blue) for one 32x32 pixel image per row. 32*32*3 # For a simple gut-check, let's transform one of our images. Each 1024 items in a row is a channel (red, green, then blue). Each 32 items in the channel are a row in the 32x32 image. Using python, we can stack these channels into a 32x32x3 array, and save it as a PNG file: # + import numpy as np # Each 1024 in a row is a channel (red, green, then blue) row = dataset_train[b'data'][0] red, green, blue = row[0:1024], row[1024:2048], row[2048:] # Each 32 items in the channel are a row in the 32x32 image red = red.reshape(32,32) green = green.reshape(32,32) blue = blue.reshape(32,32) # Combine the channels into a 32x32x3 image! combined = np.dstack((red,green,blue)) # - # For a more concise version, consider the following: # All in one: test_image = np.dstack(( row[0:1024].reshape(32,32), row[1024:2048].reshape(32,32), row[2048:].reshape(32,32) )) import matplotlib.pyplot as plt plt.imshow(test_image); # Looks like a cow! Let's check the label. `dataset_meta` contains label names in order, and `dataset_train` has a list of labels for each row. dataset_train[b'fine_labels'][0] # Our image has a label of `19`, so let's see what the 19th item is in the list of label names. print(dataset_meta[b'fine_label_names'][19]) # Ok! 'cattle' sounds about right. By the way, using the previous two lines we can do: n = 0 print(dataset_meta[b'fine_label_names'][dataset_train[b'fine_labels'][n]]) # Now we know how to check labels, is there a way that we can also check file names? `dataset_train` also contains a `b'filenames'` key. Let's see what we have here: print(dataset_train[b'filenames'][0]) # "Taurus" is the name of a subspecies of cattle, so this looks like a pretty reasonable filename. To save an image we can also do: plt.imsave("file.png", test_image) # Your new PNG file should now appear in the file explorer -- go ahead and pop it open to see! # # Now that you know how to reshape the images, save them as files, and capture their filenames and labels, let's just capture all the bicycles and motorcycles and save them. Scones Unlimited can use a model that tells these apart to route delivery drivers automatically. # # In the following cell, identify the label numbers for Bicycles and Motorcycles: # + import pandas as pd # Todo: Filter the dataset_train and dataset_meta objects to find the label numbers for Bicycle and Motorcycles bicycle_label_no = dataset_meta[b'fine_label_names'].index(b'bicycle') motorcycle_label_no = dataset_meta[b'fine_label_names'].index(b'motorcycle') print(bicycle_label_no) print(motorcycle_label_no) # - # Good job! We only need objects with label 8 and 48 -- this drastically simplifies our handling of the data! Below we construct a dataframe for you, and you can safely drop the rows that don't contain observations about bicycles and motorcycles. Fill in the missing lines below to drop all other rows: # + #Construct the dataframe df_train = pd.DataFrame({ "filenames": dataset_train[b'filenames'], "labels": dataset_train[b'fine_labels'], "row": range(len(dataset_train[b'filenames'])) }) # Drop all rows from df_train where label is not 8 or 48 df_train = df_train[(df_train["labels"]==8) | (df_train["labels"]==48)] # Decode df_train.filenames so they are regular strings df_train["filenames"] = df_train["filenames"].apply( lambda x: x.decode("utf-8") ) df_test = pd.DataFrame({ "filenames": dataset_test[b'filenames'], "labels": dataset_test[b'fine_labels'], "row": range(len(dataset_test[b'filenames'])) }) # Drop all rows from df_test where label is not 8 or 48 df_test = df_test[(df_test["labels"]==8) | (df_test["labels"]==48)] # Decode df_test.filenames so they are regular strings df_test["filenames"] = df_test["filenames"].apply( lambda x: x.decode("utf-8") ) # - df_train["labels"].unique() df_test["labels"].unique() df_train.head() for i in df_train["row"]: print(i) if(i>50): break len(df_train) len(df_test) # Now that the data is filtered for just our classes, we can save all our images. # !mkdir ./train # !mkdir ./test # Testing my logic for getting images for i in df_train[:10]["row"]: print(df_train["filenames"][i]) print(dataset_train[b'data'][i]) row = dataset_train[b'data'][i] formatted_data = np.dstack(( row[0:1024].reshape(32,32), row[1024:2048].reshape(32,32), row[2048:].reshape(32,32) )) #print(formatted_data) print(dataset_train[b'filenames'][i]) # In the previous sections we introduced you to several key snippets of code: # # 1. Grabbing the image data: # # ```python # dataset_train[b'data'][0] # ``` # # 2. A simple idiom for stacking the image data into the right shape # # ```python # import numpy as np # np.dstack(( # row[0:1024].reshape(32,32), # row[1024:2048].reshape(32,32), # row[2048:].reshape(32,32) # )) # ``` # # 3. A simple `matplotlib` utility for saving images # # ```python # plt.imsave(path+row['filenames'], target) # ``` # # Compose these together into a function that saves all the images into the `./test` and `./train` directories. Use the comments in the body of the `save_images` function below to guide your construction of the function: # # + def save_images(df, df2, path): #TODO: Complete Function for i in df["row"]: #Grab the image data in row-major form img = df2[b'data'][i] # Consolidated stacking/reshaping from earlier target = np.dstack(( img[0:1024].reshape(32,32), img[1024:2048].reshape(32,32), img[2048:].reshape(32,32) )) # Save the image plt.imsave(path+df['filenames'][i], target) # Return any signal data you want for debugging return True # - ## TODO: save ALL images using the save_images function save_images(df_train, dataset_train, './train/') save_images(df_test, dataset_test, './test/') # ### 3. Load the data # # Now we can load the data into S3. # # Using the sagemaker SDK grab the current region, execution role, and bucket. # + import sagemaker from sagemaker import get_execution_role session = sagemaker.session.Session() bucket= session.default_bucket() print("Default Bucket: {}".format(bucket)) region = session.boto_region_name print("AWS Region: {}".format(region)) role = get_execution_role() print("RoleArn: {}".format(role)) # - # With this data we can easily sync your data up into S3! # + import os os.environ["DEFAULT_S3_BUCKET"] = bucket # !aws s3 sync ./train s3://${DEFAULT_S3_BUCKET}/train/ # !aws s3 sync ./test s3://${DEFAULT_S3_BUCKET}/test/ # - # And that's it! You can check the bucket and verify that the items were uploaded. # # ## Model Training # # For Image Classification, Sagemaker [also expects metadata](https://docs.aws.amazon.com/sagemaker/latest/dg/image-classification.html) e.g. in the form of TSV files with labels and filepaths. We can generate these using our Pandas DataFrames from earlier: # + def to_metadata_file(df, prefix): df["s3_path"] = df["filenames"] df["labels"] = df["labels"].apply(lambda x: 0 if x==8 else 1) return df[["row", "labels", "s3_path"]].to_csv( f"{prefix}.lst", sep="\t", index=False, header=False ) to_metadata_file(df_train.copy(), "train") to_metadata_file(df_test.copy(), "test") # - # We can also upload our manifest files: # + import boto3 # Upload files boto3.Session().resource('s3').Bucket( bucket).Object('train.lst').upload_file('./train.lst') boto3.Session().resource('s3').Bucket( bucket).Object('test.lst').upload_file('./test.lst') # - # Using the `bucket` and `region` info we can get the latest prebuilt container to run our training job, and define an output location on our s3 bucket for the model. Use the `image_uris` function from the SageMaker SDK to retrieve the latest `image-classification` image below: # Use the image_uris function to retrieve the latest 'image-classification' image from sagemaker import image_uris algo_image = image_uris.retrieve('image-classification', session.boto_region_name) s3_output_location = f"s3://{bucket}/models/image_model" # We're ready to create an estimator! Create an estimator `img_classifier_model` that uses one instance of `ml.p2.xlarge`. Ensure that y ou use the output location we defined above - we'll be referring to that later! img_classifier_model=sagemaker.estimator.Estimator( ## TODO: define your estimator options algo_image, role, instance_count=1, instance_type='ml.p2.xlarge', # What kind of compute instances output_path=s3_output_location, sagemaker_session=session ) # We can also set a few key hyperparameters and define the inputs for our model: img_classifier_model.set_hyperparameters( image_shape='3,32,32', # TODO: Fill in num_classes=2, # TODO: Fill in num_training_samples= 1000, epochs=25# TODO: fill in ) # The `image-classification` image uses four input channels with very specific input parameters. For convenience, we've provided them below: from sagemaker.debugger import Rule, rule_configs from sagemaker.session import TrainingInput model_inputs = { "train": sagemaker.inputs.TrainingInput( s3_data=f"s3://{bucket}/train/", content_type="application/x-image" ), "validation": sagemaker.inputs.TrainingInput( s3_data=f"s3://{bucket}/test/", content_type="application/x-image" ), "train_lst": sagemaker.inputs.TrainingInput( s3_data=f"s3://{bucket}/train.lst", content_type="application/x-image" ), "validation_lst": sagemaker.inputs.TrainingInput( s3_data=f"s3://{bucket}/test.lst", content_type="application/x-image" ) } # Great, now we can train the model using the model_inputs. In the cell below, call the `fit` method on our model,: ## TODO: train your model img_classifier_model.fit(model_inputs) # If all goes well, you'll end up with a model topping out above `.8` validation accuracy. With only 1000 training samples in the CIFAR dataset, that's pretty good. We could definitely pursue data augmentation & gathering more samples to help us improve further, but for now let's proceed to deploy our model. # # ### Getting ready to deploy # # To begin with, let's configure Model Monitor to track our deployment. We'll define a `DataCaptureConfig` below: # + from sagemaker.model_monitor import DataCaptureConfig data_capture_config = DataCaptureConfig( ## TODO: Set config options enable_capture=True, sampling_percentage=100, destination_s3_uri=f"s3://{bucket}/data_capture" ) # - # Note the `destination_s3_uri` parameter: At the end of the project, we can explore the `data_capture` directory in S3 to find crucial data about the inputs and outputs Model Monitor has observed on our model endpoint over time. # # With that done, deploy your model on a single `ml.m5.xlarge` instance with the data capture config attached: # + deployment = img_classifier_model.deploy( ## TODO: fill in deployment options initial_instance_count=1, instance_type='ml.m5.xlarge', data_capture_config=data_capture_config ) endpoint = deployment.endpoint_name print(endpoint) # - # Note the endpoint name for later as well. # # Next, instantiate a Predictor: predictor = sagemaker.predictor.Predictor(endpoint) # In the code snippet below we are going to prepare one of your saved images for prediction. Use the predictor to process the `payload`. # + from sagemaker.serializers import IdentitySerializer import base64 predictor.serializer = IdentitySerializer("image/png") with open("./test/bicycle_s_001789.png", "rb") as f: payload = f.read() inference = predictor.predict(payload)## TODO: Process the payload with your predictor # - # Your `inference` object is an array of two values, the predicted probability value for each of your classes (bicycle and motorcycle respectively.) So, for example, a value of `b'[0.91, 0.09]'` indicates the probability of being a bike is 91% and being a motorcycle is 9%. print(inference) # ### Draft Lambdas and Step Function Workflow # # Your operations team uses Step Functions to orchestrate serverless workflows. One of the nice things about Step Functions is that [workflows can call other workflows](https://docs.aws.amazon.com/step-functions/latest/dg/connect-stepfunctions.html), so the team can easily plug your workflow into the broader production architecture for Scones Unlimited. # # In this next stage you're going to write and deploy three Lambda functions, and then use the Step Functions visual editor to chain them together! Our functions are going to work with a simple data object: # # ```python # { # "inferences": [], # Output of predictor.predict # "s3_key": "", # Source data S3 key # "s3_bucket": "", # Source data S3 bucket # "image_data": "" # base64 encoded string containing the image data # } # ``` # # A good test object that you can use for Lambda tests and Step Function executions, throughout the next section, might look like this: # # ```python # { # "image_data": "", # "s3_bucket": MY_BUCKET_NAME, # Fill in with your bucket # "s3_key": "test/bicycle_s_000513.png" # } # ``` # # Using these fields, your functions can read and write the necessary data to execute your workflow. Let's start with the first function. Your first Lambda function will copy an object from S3, base64 encode it, and then return it to the step function as `image_data` in an event. # # Go to the Lambda dashboard and create a new Lambda function with a descriptive name like "serializeImageData" and select thr 'Python 3.8' runtime. Add the same permissions as the SageMaker role you created earlier. (Reminder: you do this in the Configuration tab under "Permissions"). Once you're ready, use the starter code below to craft your Lambda handler: # # ```python # import json # import boto3 # import base64 # # s3 = boto3.client('s3') # # def lambda_handler(event, context): # """A function to serialize target data from S3""" # # # Get the s3 address from the Step Function event input # key = ## TODO: fill in # bucket = ## TODO: fill in # # # Download the data from s3 to /tmp/image.png # ## TODO: fill in # # # We read the data from a file # with open("/tmp/image.png", "rb") as f: # image_data = base64.b64encode(f.read()) # # # Pass the data back to the Step Function # print("Event:", event.keys()) # return { # 'statusCode': 200, # 'body': { # "image_data": image_data, # "s3_bucket": bucket, # "s3_key": key, # "inferences": [] # } # } # ``` # # The next function is responsible for the classification part - we're going to take the image output from the previous function, decode it, and then pass inferences back to the the Step Function. # # Because this Lambda will have runtime dependencies (i.e. the SageMaker SDK) you'll need to package them in your function. *Key reading:* https://docs.aws.amazon.com/lambda/latest/dg/python-package-create.html#python-package-create-with-dependency # # Create a new Lambda function with the same rights and a descriptive name, then fill in the starter code below for your classifier Lambda. # # ```python # import json # import sagemaker # import base64 # from sagemaker.serializers import IdentitySerializer # # # Fill this in with the name of your deployed model # ENDPOINT = ## TODO: fill in # # def lambda_handler(event, context): # # # Decode the image data # image = base64.b64decode(## TODO: fill in) # # # Instantiate a Predictor # predictor = ## TODO: fill in # # # For this model the IdentitySerializer needs to be "image/png" # predictor.serializer = IdentitySerializer("image/png") # # # Make a prediction: # inferences = ## TODO: fill in # # # We return the data back to the Step Function # event["inferences"] = inferences.decode('utf-8') # return { # 'statusCode': 200, # 'body': json.dumps(event) # } # ``` # # Finally, we need to filter low-confidence inferences. Define a threshold between 1.00 and 0.000 for your model: what is reasonble for you? If the model predicts at `.70` for it's highest confidence label, do we want to pass that inference along to downstream systems? Make one last Lambda function and tee up the same permissions: # # ```python # import json # # # THRESHOLD = .93 # # # def lambda_handler(event, context): # # # Grab the inferences from the event # inferences = ## TODO: fill in # # # Check if any values in our inferences are above THRESHOLD # meets_threshold = ## TODO: fill in # # # If our threshold is met, pass our data back out of the # # Step Function, else, end the Step Function with an error # if meets_threshold: # pass # else: # raise("THRESHOLD_CONFIDENCE_NOT_MET") # # return { # 'statusCode': 200, # 'body': json.dumps(event) # } # ``` # Once you have tested the lambda functions, save the code for each lambda function in a python script called 'lambda.py'. # # With your lambdas in place, you can use the Step Functions visual editor to construct a workflow that chains them together. In the Step Functions console you'll have the option to author a Standard step function *Visually*. # # When the visual editor opens, you'll have many options to add transitions in your workflow. We're going to keep it simple and have just one: to invoke Lambda functions. Add three of them chained together. For each one, you'll be able to select the Lambda functions you just created in the proper order, filter inputs and outputs, and give them descriptive names. # # Make sure that you: # # 1. Are properly filtering the inputs and outputs of your invokations (e.g. `$.body`) # 2. Take care to remove the error handling from the last function - it's supposed to "fail loudly" for your operations colleagues! # # Take a screenshot of your working step function in action and export the step function as JSON for your submission package. # # Great! Now you can use the files in `./test` as test files for our workflow. Depending on our threshold, our workflow should reliably pass predictions about images from `./test` on to downstream systems, while erroring out for inferences below our confidence threshold! # # ### Testing and Evaluation # # Do several step function invokations using data from the `./test` folder. This process should give you confidence that the workflow both *succeeds* AND *fails* as expected. In addition, SageMaker Model Monitor will generate recordings of your data and inferences which we can visualize. # # Here's a function that can help you generate test inputs for your invokations: # + import random import boto3 import json def generate_test_case(): # Setup s3 in boto3 s3 = boto3.resource('s3') # Randomly pick from sfn or test folders in our bucket objects = s3.Bucket(bucket).objects.filter(Prefix = "test") # Grab any random object key from that folder! obj = random.choice([x.key for x in objects]) print(obj) return json.dumps({ "image_data": "", "s3_bucket": bucket, "s3_key": obj }) generate_test_case() # - # In the Step Function dashboard for your new function, you can create new executions and copy in the generated test cases. Do several executions so that you can generate data you can evaluate and visualize. # # Once you've done several executions, let's visualize the record of our inferences. Pull in the JSONLines data from your inferences like so: # + from sagemaker.s3 import S3Downloader # In S3 your data will be saved to a datetime-aware path # Find a path related to a datetime you're interested in data_path = "s3://sagemaker-us-east-1-000355413178/data_capture/image-classification-2022-01-22-15-40-53-009/AllTraffic/2022/01/22/20/" ## TODO: fill in the path to your captured data S3Downloader.download(data_path, "captured_data") # Feel free to repeat this multiple times and pull in more data # - # The data are in JSONLines format, where multiple valid JSON objects are stacked on top of eachother in a single `jsonl` file. We'll import an open-source library, `jsonlines` that was purpose built for parsing this format. # !pip install jsonlines import jsonlines # Now we can extract the data from each of the source files: # + import os import json # List the file names we downloaded file_handles = os.listdir("./captured_data") # Dump all the data into an array json_data = [] for jsonl in file_handles: with jsonlines.open(f"./captured_data/{jsonl}") as f: json_data.append(f.read()) # - # The data should now be a list of dictionaries, with significant nesting. We'll give you an example of some code that grabs data out of the objects and visualizes it: # + # Define how we'll get our data def simple_getter(obj): inferences = obj["captureData"]["endpointOutput"]["data"] timestamp = obj["eventMetadata"]["inferenceTime"] return json.loads(inferences), timestamp simple_getter(json_data[5]) # - # Finally, here's an example of a visualization you can build with this data. In this last part, you will take some time and build your own - the captured data has the input images, the resulting inferences, and the timestamps. # + # Populate the data for the x and y axis x = [] y = [] for obj in json_data: inference, timestamp = simple_getter(obj) y.append(max(inference)) x.append(timestamp) # Todo: here is an visualization example, take some time to build another visual that helps monitor the result # Plot the data plt.scatter(x, y, c=['r' if k<.90 else 'b' for k in y ]) plt.axhline(y=0.90, color='g', linestyle='--') plt.ylim(bottom=.65) # Add labels plt.ylabel("Confidence") plt.suptitle("Observed Recent Inferences", size=14) plt.title("Pictured with confidence threshold for production use", size=10) # Give it some pizzaz! plt.style.use("Solarize_Light2") plt.gcf().autofmt_xdate() # + # Pie Chart for Classes Predicted by the Model # Threshold not taken into consideration import numpy as np cls_predicted = [] for obj in json_data: inference, timestamp = simple_getter(obj) cls = inference.index(max(inference)) cls_predicted.append(cls) # Bike is Class 0 # Motorcycle is Class 1 num_bike = cls_predicted.count(0) num_motorcycle = cls_predicted.count(1) y = np.array([num_bike, num_motorcycle]) mylabels = ["Bike", "Motorcycle"] plt.pie(y, labels = mylabels,autopct='%1.1f%%') plt.title("% of Classes Predicted by Model") plt.show() # - # ### Congratulations! # # You've reached the end of the project. In this project you created an event-drivent ML workflow that can be incorporated into the Scones Unlimited production architecture. You used the SageMaker Estimator API to deploy your SageMaker Model and Endpoint, and you used AWS Lambda and Step Functions to orchestrate your ML workflow. Using SageMaker Model Monitor, you instrumented and observed your Endpoint, and at the end of the project you built a visualization to help stakeholders understand the performance of the Endpoint over time. If you're up for it, you can even go further with these stretch goals: # # * Extend your workflow to incorporate more classes: the CIFAR dataset includes other vehicles that Scones Unlimited can identify with this model. # * Modify your event driven workflow: can you rewrite your Lambda functions so that the workflow can process multiple image inputs in parallel? Can the Step Function "fan out" to accomodate this new workflow? # * Consider the test data generator we provided for you. Can we use it to create a "dummy data" generator, to simulate a continuous stream of input data? Or a big paralell load of data? # * What if we want to get notified every time our step function errors out? Can we use the Step Functions visual editor in conjunction with a service like SNS to accomplish this? Try it out! # # #
SconesProjectNB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.062035, "end_time": "2021-09-19T11:55:26.005448", "exception": false, "start_time": "2021-09-19T11:55:25.943413", "status": "completed"} tags=[] cell_id="00000-d04fcdd5-5294-4d10-a8ef-dc04d7eab67d" deepnote_cell_type="markdown" # ![Digital-world-changing-the-world.jpg](./Images/image1.jpg) # # + [markdown] papermill={"duration": 0.065919, "end_time": "2021-09-19T11:55:26.137017", "exception": false, "start_time": "2021-09-19T11:55:26.071098", "status": "completed"} tags=[] cell_id="00001-74b1692a-30ee-4900-b27a-d5694f133c7c" deepnote_cell_type="markdown" # **Cover by Author | Elements by [Freepik](https://www.freepik.com/free-vector/students-using-e-learning-platform-video-laptop-graduation-cap-online-education-platform-e-learning-platform-online-teaching-concept_10782685.htm) & [Stories](https://www.freepik.com/free-vector/learning-concept-illustration_14230944.htm#page=1&query=education&position=4)** # + [markdown] papermill={"duration": 0.071299, "end_time": "2021-09-19T11:55:26.269545", "exception": false, "start_time": "2021-09-19T11:55:26.198246", "status": "completed"} tags=[] cell_id="00002-10111984-2671-46c6-99ba-ff2f0118a95a" deepnote_cell_type="markdown" # # Introduction # There is an imbalance in the education system during the Covid19 pandemic and most of the students don't even have access to educational tools and online learning platforms. There is an urgent need to come up with solutions and by using LearnPlatform dataset we will figure some of the common patterns and identify the clusters based on demography, geography, and accessibility. # ## Project Goals # In this project, we will be using data analysis tools to figure out trends in digital learning and how it is effective towards improvised communities. We will be comparing districts and states on factors like demography, internet access, learning product access, and finance. In the end, we will summarize our report and point towards the areas that need our more attention to make education accessible for all students the United States. # # # # + [markdown] papermill={"duration": 0.065923, "end_time": "2021-09-19T11:55:26.397087", "exception": false, "start_time": "2021-09-19T11:55:26.331164", "status": "completed"} tags=[] cell_id="00003-97c5d8d4-9183-4df8-aff4-2c3b8293bd54" deepnote_cell_type="markdown" # # Import relevant libraries # I will be mostly using Altair, Seaborn, and Plotly for data visualization. For data analysis, I will be using Numpy and Pandas. # + _kg_hide-input=true papermill={"duration": 2.305849, "end_time": "2021-09-19T11:55:28.764781", "exception": false, "start_time": "2021-09-19T11:55:26.458932", "status": "completed"} tags=[] cell_id="00004-3002c021-f79c-42e2-be63-0358c793431f" deepnote_to_be_reexecuted=false source_hash="ca71b0b" execution_start=1634282955351 execution_millis=1069 deepnote_cell_type="code" import numpy as np import pandas as pd import math import glob import os import altair as alt import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go # + [markdown] papermill={"duration": 0.058606, "end_time": "2021-09-19T11:55:28.883129", "exception": false, "start_time": "2021-09-19T11:55:28.824523", "status": "completed"} tags=[] cell_id="00005-d9f0b6ee-2464-4a9e-8ea7-700d76d57a31" deepnote_cell_type="markdown" # ![logo-for-Learning-Platform.jpg](./Images/image2.jpg) # # **Image by Author** # + [markdown] papermill={"duration": 0.060475, "end_time": "2021-09-19T11:55:29.003071", "exception": false, "start_time": "2021-09-19T11:55:28.942596", "status": "completed"} tags=[] cell_id="00006-d79b6997-fb33-4848-9e52-48c7f9be8397" deepnote_cell_type="markdown" # # Creating custome Altair theme # You can create your own theme in Altair which provides uniformity to your plots. # + _kg_hide-input=true papermill={"duration": 0.069803, "end_time": "2021-09-19T11:55:29.131546", "exception": false, "start_time": "2021-09-19T11:55:29.061743", "status": "completed"} tags=[] cell_id="00007-ab45ace7-15f2-4a20-a806-675eaae6d87f" deepnote_to_be_reexecuted=false source_hash="1b4d24bb" execution_start=1634282956426 execution_millis=9 deepnote_cell_type="code" def LearnPlatorm_theme(*args, **kwargs): return { "width": 500, "height": 300, "config": { "style": { "bar": { "size": 15 }, "guide-title": { "fontSize": 15 }, "guide-label": { "fontSize": 15 }, }, "scale": { "bandPaddingInner": 0.5, "bandPaddingOuter": 0.5 }, "legend": { "symbolSize": 15, "titleFontSize": 15, "labelFontSize": 9 }, "axis": { "titleFontSize": 15, "labelFontSize": 18 }, "title": {"fontSize": 20}, } } alt.themes.register('LearnPlatorm_theme', LearnPlatorm_theme) alt.themes.enable('LearnPlatorm_theme'); # + [markdown] papermill={"duration": 0.058037, "end_time": "2021-09-19T11:55:29.247115", "exception": false, "start_time": "2021-09-19T11:55:29.189078", "status": "completed"} tags=[] cell_id="00008-e903bdcd-ff9b-456c-9af9-6e8314a8f704" deepnote_cell_type="markdown" # # Data # **The Dataset Is available at [Kaggle](https://www.kaggle.com/c/learnplatform-covid19-impact-on-digital-learning) under non commercial license.** # # Use this command to download the dataset faster # # # ```python # # # !pip install kaggle # # # !kaggle competitions download -c learnplatform-covid19-impact-on-digital-learning # ``` # # # >Dont forget to add Kaggle API key 👇 # # ![env](./Images/image3.jpeg) # # # **We have three types of Dataset.** # 1. Products data contains Sevice names, Companies, and educational sectors. # 2. District data contains Demography, Locations, and Educational Spendings. # 3. Engagement data contains student's engagement with different products per day. # # ## Product # The product file `products_info.csv` includes information about the characteristics of the top 372 products with most users in 2020. The categories listed in this file are part of LearnPlatform's product taxonomy. # # | **Name** | **Description** | # |----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| # | LP ID | The unique identifier of the product | # | URL | Web Link to the specific product | # | Product Name | Name of the specific product | # | Provider/Company Name | Name of the product provider | # | Sector(s) | Sector of education where the product is used | # | Primary Essential Function | The basic function of the product. There are two layers of labels here. Products are first labeled as one of these three categories: LC = Learning & Curriculum, CM = Classroom Management, and SDO = School & District Operations. Each of these categories have multiple sub-categories with which the products were labeled | # | | # + [markdown] papermill={"duration": 0.05915, "end_time": "2021-09-19T11:55:29.364750", "exception": false, "start_time": "2021-09-19T11:55:29.305600", "status": "completed"} tags=[] cell_id="00009-6ce7620b-d847-425f-a425-591315ec508f" deepnote_cell_type="markdown" # ### Loading Data Educational Product Data # As we can see we have product names, Companies, and Sector data with Primary Functions. # + _kg_hide-input=true papermill={"duration": 0.09879, "end_time": "2021-09-19T11:55:29.522986", "exception": false, "start_time": "2021-09-19T11:55:29.424196", "status": "completed"} tags=[] cell_id="00010-d1beb914-d43a-4078-a0c8-8420006eaa1d" deepnote_to_be_reexecuted=false source_hash="40c01031" execution_start=1634282956435 execution_millis=38 deepnote_cell_type="code" products_data = pd.read_csv( "./learnplatform-covid19-impact-on-digital-learning/products_info.csv" ) products_data.head() # + [markdown] papermill={"duration": 0.059736, "end_time": "2021-09-19T11:55:29.642267", "exception": false, "start_time": "2021-09-19T11:55:29.582531", "status": "completed"} tags=[] cell_id="00011-679b93e1-d658-4811-a1a4-54e915235ae9" deepnote_cell_type="markdown" # ## District # The district file ```districts_info.csv``` includes information about the **characteristics of school districts**, including data from # - NCES (2018-19), # - FCC (Dec 2018), and # - Edunomics Lab. # # | Name | Description | # |------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| # | district_id | The unique identifier of the school district | # | state | The state where the district resides in | # | locale | NCES locale classification that categorizes U.S. territory into four types of areas: City, Suburban, Town, and Rural. See Locale Boundaries User's Manual for more information. | # | pct_black/hispanic | Percentage of students in the districts identified as Black or Hispanic based on 2018-19 NCES data | # | pct_free/reduced | Percentage of students in the districts eligible for free or reduced-price lunch based on 2018-19 NCES data | # | countyconnectionsratio | ratio (residential fixed high-speed connections over 200 kbps in at least one direction/households) based on the county level data from FCC From 477 (December 2018 version). See FCC data for more information. | # | pptotalraw | Per-pupil total expenditure (sum of local and federal expenditure) from Edunomics Lab's National Education Resource Database on Schools (NERD$) project. The expenditure data are school-by-school, and we use the median value to represent the expenditure of a given school district. | # + [markdown] papermill={"duration": 0.058609, "end_time": "2021-09-19T11:55:29.760731", "exception": false, "start_time": "2021-09-19T11:55:29.702122", "status": "completed"} tags=[] cell_id="00012-fb59b094-99a0-4d68-9555-df03555571e0" deepnote_cell_type="markdown" # ### Loading Data District Data # We have different states, locale, educational spending and more importanly we have Demographical data. # + _kg_hide-input=true papermill={"duration": 0.081509, "end_time": "2021-09-19T11:55:29.902430", "exception": false, "start_time": "2021-09-19T11:55:29.820921", "status": "completed"} tags=[] cell_id="00013-be027d96-4d21-45c6-98d6-5a9d8af8dfe2" deepnote_to_be_reexecuted=false source_hash="296802af" execution_start=1634282956500 execution_millis=24 deepnote_cell_type="code" districts_data = pd.read_csv( "./learnplatform-covid19-impact-on-digital-learning/districts_info.csv" ) districts_data.head() # + [markdown] papermill={"duration": 0.059773, "end_time": "2021-09-19T11:55:30.021583", "exception": false, "start_time": "2021-09-19T11:55:29.961810", "status": "completed"} tags=[] cell_id="00014-a1596fc4-dac3-477d-9263-43a684cd80c7" deepnote_cell_type="markdown" # ### Engagement data # The engagement data are aggregated at school district level, and each file in the folder `engagement_data` represents data from **one school district**. # # # | Name | Description | # |------------------|----------------------------------------------------------------------------------------------------------------| # | time | date in "YYYY-MM-DD" | # | lp_id | The unique identifier of the product | # | pct_access | Percentage of students in the district have at least one page-load event of a given product and on a given day | # | engagement_index | Total page-load events per one thousand students of a given product and on a given day | # + [markdown] papermill={"duration": 0.060267, "end_time": "2021-09-19T11:55:30.142424", "exception": false, "start_time": "2021-09-19T11:55:30.082157", "status": "completed"} tags=[] cell_id="00015-952031fd-19b3-44fd-be06-5e8c2dbe023e" deepnote_cell_type="markdown" # #### Engagement Data Extraction Function # Our engagement data is divided into multiple `.csv` files based on _District id_ so we need to create a Python function to extract data from the individual files and concat them into one. # # ![image.png](attachment:547bb482-cbbc-4902-9785-81b9537d00c2.png) # + _kg_hide-input=true papermill={"duration": 0.07064, "end_time": "2021-09-19T11:55:30.272837", "exception": false, "start_time": "2021-09-19T11:55:30.202197", "status": "completed"} tags=[] cell_id="00016-669ff170-a2f4-4664-84a0-3b761e8ae411" deepnote_to_be_reexecuted=false source_hash="3ac8b0e" execution_start=1634282956513 execution_millis=7 deepnote_cell_type="code" def engagement_data_extract(file_path): dir_path = os.path.dirname(file_path) ap = [] for root, dirs, files in os.walk(dir_path): for file in files: if file.endswith('.csv'): # print (root+'/'+str(file)) df = pd.read_csv(root+'/'+str(file), index_col=None, header=0) district_id = file.split(".")[0] df["district_id"] = district_id ap.append(df) engagement = pd.concat(ap) engagement = engagement.reset_index(drop=True) return engagement # + [markdown] papermill={"duration": 0.059936, "end_time": "2021-09-19T11:55:30.393622", "exception": false, "start_time": "2021-09-19T11:55:30.333686", "status": "completed"} tags=[] cell_id="00017-68d4664e-9aad-4c94-a346-f02fd31f4112" deepnote_cell_type="markdown" # ### Loading Data Engagement Data # It took us 19 seconds to load all files from a folder and now we have DataFrame containing all distract Engagement Data. # + _kg_hide-input=true papermill={"duration": 20.850712, "end_time": "2021-09-19T11:55:51.304373", "exception": false, "start_time": "2021-09-19T11:55:30.453661", "status": "completed"} tags=[] cell_id="00018-0557c55e-2501-4889-9e64-7cc85e0bbf96" deepnote_to_be_reexecuted=false source_hash="1b9a8d99" execution_start=1634282956520 execution_millis=14670 deepnote_cell_type="code" # %%time engagement_data = engagement_data_extract( "./learnplatform-covid19-impact-on-digital-learning/engagement_data/" ) engagement_data.head() # + [markdown] papermill={"duration": 0.060584, "end_time": "2021-09-19T11:55:51.426581", "exception": false, "start_time": "2021-09-19T11:55:51.365997", "status": "completed"} tags=[] cell_id="00019-5d06bbf3-1493-428e-98f5-c0a71bed9970" deepnote_cell_type="markdown" # # Missing Data # Let's look at all three DataFrames for missing values per Column # + [markdown] papermill={"duration": 0.061356, "end_time": "2021-09-19T11:55:51.549188", "exception": false, "start_time": "2021-09-19T11:55:51.487832", "status": "completed"} tags=[] cell_id="00020-9c05e944-cb90-4f7c-9502-cff1dd491c4c" deepnote_cell_type="markdown" # ## Plot Missing function # The function helps us plot an Altair bar chart for all data frames containing missing values. # + _kg_hide-input=true papermill={"duration": 0.068501, "end_time": "2021-09-19T11:55:51.677891", "exception": false, "start_time": "2021-09-19T11:55:51.609390", "status": "completed"} tags=[] cell_id="00021-99a75e20-e7ed-44ca-8854-57c6e90136dc" deepnote_to_be_reexecuted=false source_hash="ad85243b" execution_start=1634282971231 execution_millis=0 deepnote_cell_type="code" def plot_missing(df): data = df.isnull().sum().to_frame().reset_index()\ .rename(columns={'index':'Columns',0:'Counts'}) return alt.Chart(data).mark_bar().encode( x='Columns', y='Counts', tooltip='Counts' ) # + [markdown] papermill={"duration": 0.061229, "end_time": "2021-09-19T11:55:51.799759", "exception": false, "start_time": "2021-09-19T11:55:51.738530", "status": "completed"} tags=[] cell_id="00022-9a3d6b09-8894-43be-8652-3ff5fe3c95cf" deepnote_cell_type="markdown" # As we can observe that our district dataset mostly contains missing values in `pp_total_raw,pct_free/reduced,county_connections_ratio`. Let's see all the missing values together in the next section. # + _kg_hide-input=true papermill={"duration": 0.123676, "end_time": "2021-09-19T11:55:51.985383", "exception": false, "start_time": "2021-09-19T11:55:51.861707", "status": "completed"} tags=[] cell_id="00023-63ba0e5a-5b81-4553-9bfa-ee6d09c7e562" deepnote_to_be_reexecuted=false source_hash="92fa6e50" execution_start=1634282971231 execution_millis=50 deepnote_cell_type="code" NA = pd.DataFrame(data=[districts_data.isna().sum().tolist(), ["{:.2f}".format(i)+'%' \ for i in (districts_data.isna().sum()/districts_data.shape[0]*100).tolist()]], columns=districts_data.columns, index=['NA Count', 'NA Percent']).transpose().\ sort_values(by='NA Count',ascending=False) NA.style.background_gradient(cmap="vlag", subset=['NA Count']) # + [markdown] papermill={"duration": 0.060626, "end_time": "2021-09-19T11:55:52.108642", "exception": false, "start_time": "2021-09-19T11:55:52.048016", "status": "completed"} tags=[] cell_id="00024-21092662-d70e-46c2-b3ac-aed953ea629b" deepnote_cell_type="markdown" # The subplot below shows missing values in all three datasets we will be removing them in the next section as it is hard to replace them without actual information. # + _kg_hide-input=true papermill={"duration": 4.45148, "end_time": "2021-09-19T11:55:56.621509", "exception": false, "start_time": "2021-09-19T11:55:52.170029", "status": "completed"} tags=[] cell_id="00025-957e89c2-55a5-4e32-8c7e-7b113b22098b" deepnote_to_be_reexecuted=false source_hash="4b500a76" execution_start=1634282971986 execution_millis=1002 deepnote_cell_type="code" plot_eng = plot_missing(engagement_data).\ properties(title='Engagement', width=200, height=200) plot_pro = plot_missing(products_data).\ properties(title='Products', width=200, height=200) plot_dis = plot_missing(districts_data).\ properties(title='Districts', width=200, height=200) alt.hconcat(plot_pro, plot_dis, plot_eng ) # + [markdown] papermill={"duration": 0.062767, "end_time": "2021-09-19T11:55:56.747104", "exception": false, "start_time": "2021-09-19T11:55:56.684337", "status": "completed"} tags=[] cell_id="00026-0c24ccc7-29dd-43fe-a9bf-e7ed179c8920" deepnote_cell_type="markdown" # # Cleaning Data # We will be cleaning district data as it contains an array of values. We will be taking advantage of both limits. # 1. Converting list of columns`pct_black/hispanic` and `pct_free/reduced` into float # 2. Converting list `pp_total_raw` into Integers. # 3. Droping `county_connections_ratio` due to static 0.18 to 1 values acrross all categories. # + _kg_hide-input=true papermill={"duration": 0.086503, "end_time": "2021-09-19T11:55:56.895991", "exception": false, "start_time": "2021-09-19T11:55:56.809488", "status": "completed"} tags=[] cell_id="00027-9e40edf3-892e-4266-b0d4-5fe0e98c9056" deepnote_to_be_reexecuted=false source_hash="2f096bd4" execution_start=1634282972991 execution_millis=66 deepnote_cell_type="code" districts_data.dropna(inplace = True) for i in ['pct_black/hispanic', 'pct_free/reduced','county_connections_ratio']: districts_data[i] = districts_data[i].apply(lambda x: float(str(x).split(',')[0][1:])+0.1) districts_data['pp_total_raw'] = districts_data['pp_total_raw'].apply(lambda x: int(x.split(',')[0][1:]) + 1000) districts_data.drop('county_connections_ratio', axis = 1, inplace = True) districts_data.head() # + [markdown] papermill={"duration": 0.064748, "end_time": "2021-09-19T11:55:57.023438", "exception": false, "start_time": "2021-09-19T11:55:56.958690", "status": "completed"} tags=[] cell_id="00028-06fda968-45a5-4a6b-b69b-23e91e280d29" deepnote_cell_type="markdown" # # Feature Distributions Visualization # In this section, we will discover different features distributions. # + [markdown] papermill={"duration": 0.063221, "end_time": "2021-09-19T11:55:57.149562", "exception": false, "start_time": "2021-09-19T11:55:57.086341", "status": "completed"} tags=[] cell_id="00029-eea38d18-0818-4516-bbb4-a7d0a84b9ac2" deepnote_cell_type="markdown" # ## Districts per states # As we can see the dataset contains more districts from Utah and Illinois than any other state. # + _kg_hide-input=true papermill={"duration": 0.097585, "end_time": "2021-09-19T11:55:57.310803", "exception": false, "start_time": "2021-09-19T11:55:57.213218", "status": "completed"} tags=[] cell_id="00030-5fc89ac1-1477-4eb4-99a7-37ac9a4938ea" deepnote_to_be_reexecuted=false source_hash="2f30e3d9" execution_start=1634282973041 execution_millis=16 deepnote_cell_type="code" alt.Chart(districts_data.dropna()).mark_bar( cornerRadiusTopLeft=3, cornerRadiusTopRight=3).encode( x=alt.X('state:O',sort=alt.EncodingSortField(field="state", op="count", order='descending')), y=alt.Y('count(state):O'), tooltip='count(state):O', color=alt.Color('state:O', sort=alt.EncodingSortField(field="state",op="count", order='descending'), legend=None,scale=alt.Scale(scheme = 'blues')) ).properties(title='Number of Districts per states') # + [markdown] papermill={"duration": 0.06413, "end_time": "2021-09-19T11:55:57.438751", "exception": false, "start_time": "2021-09-19T11:55:57.374621", "status": "completed"} tags=[] cell_id="00031-86db6353-653f-446d-b4fc-38b6037df3fe" deepnote_cell_type="markdown" # ## Locale Distribution # The suburbs are dominating with 59 percentage, which also means people from medium to high class have more access to internet. # + _kg_hide-input=true papermill={"duration": 0.230029, "end_time": "2021-09-19T11:55:57.732622", "exception": false, "start_time": "2021-09-19T11:55:57.502593", "status": "completed"} tags=[] cell_id="00032-2d210496-7ce1-45a8-b2e7-84a6bd2bab62" deepnote_to_be_reexecuted=false source_hash="52ba7c3f" execution_start=1634282973053 execution_millis=188 deepnote_cell_type="code" fig, ax = plt.subplots(figsize=(16, 8)) fig.suptitle('Locale Distribution', size = 20, color = "black") explode = ( 0.03, 0.03, 0.3, 0.03) labels = list(districts_data.locale.value_counts().index) sizes = districts_data["locale"].dropna().value_counts() ax.pie(sizes, explode = explode, colors = sns.color_palette("Set2"), startangle = 60, labels = labels, autopct = '%1.0f%%', pctdistance = 0.9 ) ax.add_artist(plt.Circle((0,0),0.4,fc='white')) plt.show() # + [markdown] papermill={"duration": 0.0659, "end_time": "2021-09-19T11:55:57.865061", "exception": false, "start_time": "2021-09-19T11:55:57.799161", "status": "completed"} tags=[] cell_id="00033-f3d680dd-9ed2-4d60-bd2b-e656c3d394b1" deepnote_cell_type="markdown" # ## Educational Product Providers # Google provide most educational products then any one close to it. # >In short online education is dominatied by Google LLC # + _kg_hide-input=true papermill={"duration": 0.128914, "end_time": "2021-09-19T11:55:58.060346", "exception": false, "start_time": "2021-09-19T11:55:57.931432", "status": "completed"} tags=[] cell_id="00034-b681eac8-0a84-4339-883c-277c2c929e42" deepnote_to_be_reexecuted=false source_hash="235c9089" execution_start=1634282973230 execution_millis=65 deepnote_cell_type="code" alt.Chart(products_data.dropna()).transform_aggregate( count='count()', groupby=['Provider/Company Name'] ).transform_window( rank='rank(count)', sort=[alt.SortField('count', order='descending')] ).transform_filter( alt.datum.rank < 10 ).mark_bar( cornerRadiusTopLeft=3, cornerRadiusTopRight=3).encode( y=alt.Y('Provider/Company Name:N', sort='-x'), x='count:Q', tooltip='count:Q', color=alt.Color('Provider/Company Name:O', sort=alt.EncodingSortField(field="Provider/Company Name",op="count", order='descending'), legend=None,scale=alt.Scale(scheme = 'set2')) ).properties(title='Top Educational Product Providers') # + [markdown] papermill={"duration": 0.065195, "end_time": "2021-09-19T11:55:58.192894", "exception": false, "start_time": "2021-09-19T11:55:58.127699", "status": "completed"} tags=[] cell_id="00035-bb42e4b2-213b-4901-a5a1-869e5a042ea7" deepnote_cell_type="markdown" # Let'see what are the products provided by Google # + _kg_hide-input=true papermill={"duration": 0.080366, "end_time": "2021-09-19T11:55:58.338809", "exception": false, "start_time": "2021-09-19T11:55:58.258443", "status": "completed"} tags=[] cell_id="00036-9517f3a2-434b-45b3-94ed-9e9b2c1558d4" deepnote_to_be_reexecuted=false source_hash="a24d2b9c" execution_start=1634282973283 execution_millis=16 deepnote_cell_type="code" products_data[products_data['Provider/Company Name']=='Google LLC']['Product Name'].value_counts().head(10).to_frame() # + [markdown] papermill={"duration": 0.067595, "end_time": "2021-09-19T11:55:58.474594", "exception": false, "start_time": "2021-09-19T11:55:58.406999", "status": "completed"} tags=[] cell_id="00037-b2f0213a-e29a-4a80-b0a7-aa21a9a3d7e6" deepnote_cell_type="markdown" # ## Educational Sector Distributions # Educational sectors are divided into three categories, PreK-12, Higher Education, and Corporate. # >Some products are specific to a sector but others are quite general. # + _kg_hide-input=true papermill={"duration": 0.084427, "end_time": "2021-09-19T11:55:58.626996", "exception": false, "start_time": "2021-09-19T11:55:58.542569", "status": "completed"} tags=[] cell_id="00038-a796ebc6-f1e0-4815-ab9c-07394f9875ad" deepnote_to_be_reexecuted=false source_hash="1bfe833" execution_start=1634282973290 execution_millis=33 deepnote_cell_type="code" sector = products_data["Sector(s)"].value_counts().to_frame().reset_index() sector.style.background_gradient(cmap="Set2", subset=['Sector(s)']) # + [markdown] papermill={"duration": 0.068752, "end_time": "2021-09-19T11:55:58.764112", "exception": false, "start_time": "2021-09-19T11:55:58.695360", "status": "completed"} tags=[] cell_id="00039-85e1d131-3e75-4af1-826a-4fcde560d4e0" deepnote_cell_type="markdown" # As we can see PreK education is dominating the distribution with 54 percentage. # + _kg_hide-input=true papermill={"duration": 0.186779, "end_time": "2021-09-19T11:55:59.019523", "exception": false, "start_time": "2021-09-19T11:55:58.832744", "status": "completed"} tags=[] cell_id="00040-82441b06-ddcf-4780-9ed6-76b62e51e943" deepnote_to_be_reexecuted=false source_hash="cc9e9abb" execution_start=1634282973319 execution_millis=137 deepnote_cell_type="code" c1,c2,c3 = 0, 0, 0 for s in products_data["Sector(s)"]: if(not pd.isnull(s)): s = s.split(";") for i in range(len(s)): sub = s[i].strip() if(sub == 'PreK-12'): c1+=1 if(sub == 'Higher Ed'): c2+=1 if(sub == 'Corporate'): c3+=1 fig, ax = plt.subplots(figsize=(16, 8)) fig.suptitle('Educational Sector Distribution', size = 20, color = "black") explode = ( 0.03, 0.03, 0.3) labels = ['PreK-12','Higher Ed','Corporate'] sizes = [c1,c2,c3] ax.pie(sizes, explode = explode, colors = sns.color_palette("Set2"), startangle = 60, labels = labels, autopct = '%1.0f%%', pctdistance = 0.9 ) ax.add_artist(plt.Circle((0,0),0.4,fc='white')) plt.show() # + [markdown] papermill={"duration": 0.069813, "end_time": "2021-09-19T11:55:59.159701", "exception": false, "start_time": "2021-09-19T11:55:59.089888", "status": "completed"} tags=[] cell_id="00041-43496f78-9a5a-4f51-a079-da961f94370d" deepnote_cell_type="markdown" # ## Primary Functions with main and subcategories # There are three types of main primary functions. # 1. LC = Learning & Curriculum # 2. CM = Classroom Management # 3. SDO = School & District Operations. # # You can interact with Plotly sunburst plot to explore the distribution of main categories and subcategories. # # >The Learning products have majority shares in this group and the most common subcategories are digital learning platforms. # + _kg_hide-input=true papermill={"duration": 1.053296, "end_time": "2021-09-19T11:56:00.281006", "exception": false, "start_time": "2021-09-19T11:55:59.227710", "status": "completed"} tags=[] cell_id="00042-623e45cd-7d23-47da-b0c3-1b0d38d2568e" deepnote_to_be_reexecuted=false source_hash="a4e5ae6d" execution_start=1634282973450 execution_millis=1916 deepnote_cell_type="code" PEF_cat_main = [] PEF_cat_sub = [] for p in products_data["Primary Essential Function"]: if (not pd.isnull(p)): cat1 = p.split("-",1)[0].strip() PEF_cat_main.append(cat1) cat2 = p.split("-",1)[1].strip() PEF_cat_sub.append(cat2) else: PEF_cat_main.append(np.nan) PEF_cat_sub.append(np.nan) products_data["Essential_Function_main"] = PEF_cat_main products_data["Essential_Function_sub"] = PEF_cat_sub DE = ( products_data[["Essential_Function_main", "Essential_Function_sub"]] .value_counts() .rename_axis(["Essential_Function_main", "Essential_Function_sub"]) .reset_index(name="counts") ) fig = px.sunburst( DE, path=["Essential_Function_main", "Essential_Function_sub"], values="counts", title="Sunburst Primary Essential Functions", ) fig.show() # + [markdown] papermill={"duration": 0.070785, "end_time": "2021-09-19T11:56:00.423740", "exception": false, "start_time": "2021-09-19T11:56:00.352955", "status": "completed"} tags=[] cell_id="00043-044ea769-cc50-4a03-8c44-07ca412a50b4" deepnote_cell_type="markdown" # ## Essential Function Subcategories Distribution # We can observe all the subcategories distribution. # # > Content creation and digital learning are leading in this market. # + _kg_hide-input=true papermill={"duration": 0.178292, "end_time": "2021-09-19T11:56:00.681546", "exception": false, "start_time": "2021-09-19T11:56:00.503254", "status": "completed"} tags=[] cell_id="00044-6de2320a-9207-4dda-89db-3c88bb4f6704" deepnote_to_be_reexecuted=false source_hash="49ef13c2" execution_start=1634282975363 execution_millis=114 deepnote_cell_type="code" fig = px.histogram( DE, x = "counts", y = "Essential_Function_sub", title="Primary Essential Function Sub-Categories", ) fig.update_traces(marker = dict( color='mediumpurple' )), fig.show() # + [markdown] papermill={"duration": 0.070286, "end_time": "2021-09-19T11:56:00.823357", "exception": false, "start_time": "2021-09-19T11:56:00.753071", "status": "completed"} tags=[] cell_id="00045-6d318ce0-c75e-4f4c-a3ce-700b94621bb3" deepnote_cell_type="markdown" # # Merging Three Datasets # We will be merging all three datasets on `LP ID` and `district id`. As we can see the combined dataset has 18 columns that will later help us with both geographical and time series plots. # + _kg_hide-input=true papermill={"duration": 8.680875, "end_time": "2021-09-19T11:56:09.575760", "exception": false, "start_time": "2021-09-19T11:56:00.894885", "status": "completed"} tags=[] cell_id="00046-3f1db9f7-9001-4b50-bc85-ec0d91404dc8" deepnote_to_be_reexecuted=false source_hash="199484d0" execution_start=1634282976329 execution_millis=4733 deepnote_cell_type="code" engagement_data['time'] = pd.to_datetime(engagement_data['time']) engagement_data["district_id"] = engagement_data["district_id"].astype(str).astype(int) # + _kg_hide-input=true papermill={"duration": 10.684909, "end_time": "2021-09-19T11:56:20.332473", "exception": false, "start_time": "2021-09-19T11:56:09.647564", "status": "completed"} tags=[] cell_id="00047-2a4b3b2c-95dc-4f41-a6b4-f0d0a2b72d1c" deepnote_to_be_reexecuted=false source_hash="4620fa08" execution_start=1634282981078 execution_millis=8112 deepnote_cell_type="code" complete_data = products_data.merge( engagement_data, left_on='LP ID', right_on='lp_id' ).merge( districts_data, left_on='district_id', right_on='district_id' ) # merging 3 datasets triggers memory warning # + _kg_hide-input=true papermill={"duration": 0.098791, "end_time": "2021-09-19T11:56:20.502371", "exception": false, "start_time": "2021-09-19T11:56:20.403580", "status": "completed"} tags=[] cell_id="00048-872c04dd-602b-47b6-8476-c5e8eb7b3aa3" deepnote_to_be_reexecuted=false source_hash="4d7e688f" execution_start=1634282989190 execution_millis=17 deepnote_cell_type="code" complete_data.head() # + [markdown] papermill={"duration": 0.071446, "end_time": "2021-09-19T11:56:20.645808", "exception": false, "start_time": "2021-09-19T11:56:20.574362", "status": "completed"} tags=[] cell_id="00049-896534db-be24-49e3-82ce-7ba286762c63" deepnote_cell_type="markdown" # # Top Educational Products # Google products are dominating with Wikipedia and Netflix as an exception. Netflix provides kids with educational seise and tutorials. # + _kg_hide-input=true papermill={"duration": 1.059643, "end_time": "2021-09-19T11:56:21.777019", "exception": false, "start_time": "2021-09-19T11:56:20.717376", "status": "completed"} tags=[] cell_id="00050-2aec826f-39de-4042-b379-9e49ca2f6877" deepnote_to_be_reexecuted=false source_hash="db3b0ae7" execution_start=1634282989235 execution_millis=364 deepnote_cell_type="code" complete_data['Product Name'].value_counts().head(20).to_frame() # + [markdown] papermill={"duration": 0.070247, "end_time": "2021-09-19T11:56:21.918235", "exception": false, "start_time": "2021-09-19T11:56:21.847988", "status": "completed"} tags=[] cell_id="00051-ae0d84a0-16a7-4972-a9cd-20185d27fa8b" deepnote_cell_type="markdown" # # Distribution of Race, Reduced Fee, Expenditures, and Internet connection per state # We will be used the Pandas function to display a table showing the mean distribution of various features related to demography and expenditures. # + _kg_hide-input=true papermill={"duration": 0.079366, "end_time": "2021-09-19T11:56:22.067991", "exception": false, "start_time": "2021-09-19T11:56:21.988625", "status": "completed"} tags=[] cell_id="00052-f4ac20b6-dac0-4bdd-8ac8-4a462545e602" deepnote_to_be_reexecuted=false source_hash="8999c996" execution_start=1634282989592 deepnote_cell_type="code" def mean_state_data(df,col): return df[[col,'state']].\ groupby(by='state').\ mean().\ reset_index().\ sort_values(by= col, ascending = False).\ style.background_gradient(cmap="Set2_r", subset=[col]) # + [markdown] papermill={"duration": 0.071577, "end_time": "2021-09-19T11:56:22.213568", "exception": false, "start_time": "2021-09-19T11:56:22.141991", "status": "completed"} tags=[] cell_id="00053-1336e965-ebf0-4a86-9707-11624e576222" deepnote_cell_type="markdown" # ## Black and Hispanic # I seems like Taxes have more Black/Hispanic students than another state followed by Florida, Michigan, and Minnesota. # + _kg_hide-input=true papermill={"duration": 1.809542, "end_time": "2021-09-19T11:56:24.095114", "exception": false, "start_time": "2021-09-19T11:56:22.285572", "status": "completed"} tags=[] cell_id="00054-12f06e9e-01db-4d71-9028-d73ef69a86c3" deepnote_to_be_reexecuted=false source_hash="54e2fb36" execution_start=1634282989605 execution_millis=1345 deepnote_cell_type="code" mean_state_data(complete_data,'pct_black/hispanic') # + [markdown] papermill={"duration": 0.071604, "end_time": "2021-09-19T11:56:24.239420", "exception": false, "start_time": "2021-09-19T11:56:24.167816", "status": "completed"} tags=[] cell_id="00055-b03c464f-009a-4e0c-a508-a64ec7f41692" deepnote_cell_type="markdown" # ## Reduced Fee or Free Education # Minnesota provides 70 percent of free or reduced fee education, followed by Michigan and Indiana. # + _kg_hide-input=true papermill={"duration": 0.588293, "end_time": "2021-09-19T11:56:24.900539", "exception": false, "start_time": "2021-09-19T11:56:24.312246", "status": "completed"} tags=[] cell_id="00056-59e5f7e3-4e09-4cba-85e6-bf86a9eda45c" deepnote_to_be_reexecuted=false source_hash="a4ffb9c1" execution_start=1634282990942 execution_millis=325 deepnote_cell_type="code" mean_state_data(complete_data,'pct_free/reduced') # + [markdown] papermill={"duration": 0.073121, "end_time": "2021-09-19T11:56:25.046201", "exception": false, "start_time": "2021-09-19T11:56:24.973080", "status": "completed"} tags=[] cell_id="00057-bb80dd0d-87fa-485e-9971-2a7757bd5a74" deepnote_cell_type="markdown" # ## Per-pupil total expenditure # New York spend more on education than any other State in US. The runner-up States are New Jersey and Minnesota. # + papermill={"duration": 0.598765, "end_time": "2021-09-19T11:56:25.718316", "exception": false, "start_time": "2021-09-19T11:56:25.119551", "status": "completed"} tags=[] cell_id="00058-1689bbf9-51c5-4fce-806c-c934e2c24abd" deepnote_to_be_reexecuted=false source_hash="fca1072e" execution_start=1634282991274 execution_millis=359 deepnote_cell_type="code" mean_state_data(complete_data,'pp_total_raw') # + [markdown] papermill={"duration": 0.07344, "end_time": "2021-09-19T11:56:25.865257", "exception": false, "start_time": "2021-09-19T11:56:25.791817", "status": "completed"} tags=[] cell_id="00059-f96ed2a2-23c9-4d91-a29d-aa9ac78eab4d" deepnote_cell_type="markdown" # # Time Series Distribution of Educational Product Access Students # We will be observing time-series distributions of Product Access Based on Locale and State. # + [markdown] papermill={"duration": 0.072098, "end_time": "2021-09-19T11:56:26.010744", "exception": false, "start_time": "2021-09-19T11:56:25.938646", "status": "completed"} tags=[] cell_id="00060-49784343-047d-4b89-98ae-43b5f7947a70" deepnote_cell_type="markdown" # ## Based on Locale # The cities were hit with Covid19 the hardest which affected the students the most as they have to study from home that is why we can see the dip in product access from April to July 2020 as compared to other locales. During Summer vacation students stopped using educational products. In suburbs, students have more access to the tools then other locale. overall, they all follow a common pattern. # + _kg_hide-input=true papermill={"duration": 1.118829, "end_time": "2021-09-19T11:56:27.207702", "exception": false, "start_time": "2021-09-19T11:56:26.088873", "status": "completed"} tags=[] cell_id="00061-36cad8a8-cc72-48de-85f3-c6b0668b000a" deepnote_to_be_reexecuted=false source_hash="bfe02531" execution_start=1634282991629 execution_millis=745 deepnote_cell_type="code" locale = complete_data[["locale","pct_access","time"]].\ groupby(by=["time","locale"])\ .mean()\ .dropna()\ .reset_index() fig = px.line(locale, x="time", y="pct_access", facet_col='locale',facet_col_wrap=1,color = 'locale') fig.update_layout( title=("Educational product Access per Locale").title(), ) fig.show() # + [markdown] papermill={"duration": 0.07687, "end_time": "2021-09-19T11:56:27.361724", "exception": false, "start_time": "2021-09-19T11:56:27.284854", "status": "completed"} tags=[] cell_id="00062-eefe6618-21a0-44a9-ae19-a591f9d953e9" deepnote_cell_type="markdown" # ## Based on Top Five States # New York was hit hardest with Covid19 and that didn't affect the online learning of students as you can see graph became more consistent after the lockdown. # >New York, Wisconsin, and Indian have higher students using these educational products overall. # + _kg_hide-input=true papermill={"duration": 0.98071, "end_time": "2021-09-19T11:56:28.418451", "exception": false, "start_time": "2021-09-19T11:56:27.437741", "status": "completed"} tags=[] cell_id="00063-0602137e-e680-48db-a4c4-9907ff8b1243" deepnote_to_be_reexecuted=false source_hash="7c50842e" execution_start=1634282992367 execution_millis=687 deepnote_cell_type="code" df = complete_data[["state","pct_access","time"]].groupby(by=["time","state"]).mean() df.dropna(inplace =True) df.reset_index(inplace = True) top_5 = df.groupby(by='state')\ .mean()\ .sort_values(by='pct_access',ascending=False)\ .head(5)\ .reset_index()['state']\ .to_list() top_states = df[df.state.isin(top_5)] fig = px.line(top_states, x="time", y="pct_access", facet_col='state',facet_col_wrap=1,color = 'state') fig.update_layout( title=("Educational product Access on Top 5 States").title(), ) fig.show() # + [markdown] papermill={"duration": 0.07835, "end_time": "2021-09-19T11:56:28.575824", "exception": false, "start_time": "2021-09-19T11:56:28.497474", "status": "completed"} tags=[] cell_id="00064-a39fdc37-b0a6-4884-a947-f8bdd9792893" deepnote_cell_type="markdown" # ## Based on Bottom Five States # This is odd as Michigan and North Carolina have almost zero product access from March till the end of August. The Texas have a similar pattern but they had a peak in June. Overall these bottom states became active after September 2020. # # > It is strange that some states have a very odd pattern, maybe due to a bad educational system or lack of awareness. # + _kg_hide-input=true papermill={"duration": 0.300228, "end_time": "2021-09-19T11:56:28.956376", "exception": false, "start_time": "2021-09-19T11:56:28.656148", "status": "completed"} tags=[] cell_id="00065-96d2d546-02f7-47e0-9426-c80f2d383c80" deepnote_to_be_reexecuted=false source_hash="8eaca915" execution_start=1634282993051 execution_millis=189 deepnote_cell_type="code" bottom_5 = df.groupby(by='state')\ .mean()\ .sort_values(by='pct_access',ascending=False)\ .tail(5)\ .reset_index()['state']\ .to_list() top_states = df[df.state.isin(bottom_5)] fig = px.line(top_states, x="time", y="pct_access", facet_col='state',facet_col_wrap=1,color = 'state') fig.update_layout( title=("Educational product Access on Bottom 5 States").title(), ) fig.show() # + [markdown] papermill={"duration": 0.082133, "end_time": "2021-09-19T11:56:29.121159", "exception": false, "start_time": "2021-09-19T11:56:29.039026", "status": "completed"} tags=[] cell_id="00066-0649789b-90e0-4fa8-9d6a-b57e38fe8f5d" deepnote_cell_type="markdown" # # Geographical Analysis # In this section will be looking at different states based on product access and engagement index. # >We do not have access to all states data so you might see a lot of gray space on the map. # + _kg_hide-input=true papermill={"duration": 0.743826, "end_time": "2021-09-19T11:56:29.947018", "exception": false, "start_time": "2021-09-19T11:56:29.203192", "status": "completed"} tags=[] cell_id="00067-987b7209-85bd-4d74-ac80-2d4cecf0133c" deepnote_to_be_reexecuted=false source_hash="ea840b8f" execution_start=1634282993226 execution_millis=440 deepnote_cell_type="code" geo_pct = complete_data[["state","pct_access"]]\ .groupby(by=["state"])\ .mean()\ .dropna()\ .reset_index() geo = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv') geo_pct = pd.merge(geo_pct,geo,on = 'state', how = 'left')[['state','pct_access','code']] geo_pct.head() # + [markdown] papermill={"duration": 0.08148, "end_time": "2021-09-19T11:56:30.110964", "exception": false, "start_time": "2021-09-19T11:56:30.029484", "status": "completed"} tags=[] cell_id="00068-f431485c-dab8-4cde-bbd8-22ae3762e201" deepnote_cell_type="markdown" # ## Education Products Access per State # New York has a higher product access score than any other state, then comes Orange States Wisconson, Illinois, and Indiana. There might be a link between the lower population of Black/Hispanics in these states. Let's explore that in the next part. # + _kg_hide-input=true papermill={"duration": 0.113533, "end_time": "2021-09-19T11:56:30.306416", "exception": false, "start_time": "2021-09-19T11:56:30.192883", "status": "completed"} tags=[] cell_id="00069-5a11afe7-c77d-4833-9f4e-f6d931190341" deepnote_to_be_reexecuted=false source_hash="35f3ab0b" execution_start=1634282993667 execution_millis=63 deepnote_cell_type="code" fig = go.Figure(data=go.Choropleth( locations=geo_pct['code'], # Spatial coordinates z = geo_pct['pct_access'].astype(float), # Data to be color-coded locationmode = 'USA-states', # set of locations match entries in `locations` colorscale = 'plasma', text = geo_pct['state'], colorbar_title = "product access", )) fig.update_layout( title_text = 'Education Products access per state', geo_scope='usa', # limite map scope to USA ) fig.show() # + [markdown] papermill={"duration": 0.082986, "end_time": "2021-09-19T11:56:30.473487", "exception": false, "start_time": "2021-09-19T11:56:30.390501", "status": "completed"} tags=[] cell_id="00070-71f46daa-8dd4-4cc1-9f05-f4b70254b2d2" deepnote_cell_type="markdown" # There is a relation between states with lower the population of Black/Hispanic but it is not the stronger. I can see Newyork and Wisconsin with a lower Black/Hispanic population and higher access rate but when it comes to other states with a lower percentage of people of color it's all over the place. The system is fairly balanced towards other races. # + _kg_hide-input=true papermill={"duration": 0.586517, "end_time": "2021-09-19T11:56:31.143945", "exception": false, "start_time": "2021-09-19T11:56:30.557428", "status": "completed"} tags=[] cell_id="00071-f8eefbd5-dcc2-418a-a376-100e83077842" deepnote_to_be_reexecuted=false source_hash="96d29004" execution_start=1634282993723 execution_millis=391 deepnote_cell_type="code" Top_state = ['New York','Wisconson', 'Illinois','Indiana'] Race_state = complete_data[['state','pct_access','pct_black/hispanic']]\ .groupby('state')\ .mean()\ .dropna()\ .reset_index()\ .sort_values(by=['pct_black/hispanic','pct_access']) Race_state # + [markdown] papermill={"duration": 0.083497, "end_time": "2021-09-19T11:56:31.311941", "exception": false, "start_time": "2021-09-19T11:56:31.228444", "status": "completed"} tags=[] cell_id="00072-6c4524c6-8a9d-440f-8612-b19f0f872bf3" deepnote_cell_type="markdown" # ## Engagement Index Per State # The Engagement distribution per state is quite similar to product access. It's logical that if students have access to these products there is a chance of an increase in usage of these platforms. # + _kg_hide-input=true papermill={"duration": 0.607767, "end_time": "2021-09-19T11:56:32.005495", "exception": false, "start_time": "2021-09-19T11:56:31.397728", "status": "completed"} tags=[] cell_id="00073-2bb47b4f-9a0d-4c95-81ce-49fc86137175" deepnote_to_be_reexecuted=false source_hash="40c2dd45" execution_start=1634282994108 execution_millis=362 deepnote_cell_type="code" geo_engag = complete_data[["state","engagement_index"]]\ .groupby(by=["state"])\ .mean()\ .dropna()\ .reset_index() geo_engag = pd.merge(geo_engag,geo,on = 'state', how = 'left')[['state','engagement_index','code']] geo_engag.head() # + papermill={"duration": 0.105241, "end_time": "2021-09-19T11:56:32.195836", "exception": false, "start_time": "2021-09-19T11:56:32.090595", "status": "completed"} tags=[] cell_id="00074-55117c76-7c17-4ee5-a2d3-16119a1c5086" deepnote_to_be_reexecuted=false source_hash="5aee9659" execution_start=1634282994463 execution_millis=15 deepnote_cell_type="code" fig = go.Figure(data=go.Choropleth( locations=geo_engag['code'], # Spatial coordinates z = geo_engag['engagement_index'].astype(float), # Data to be color-coded locationmode = 'USA-states', # set of locations match entries in `locations` colorscale = 'plasma', text = geo_engag['state'], colorbar_title = "Engagement Index", )) fig.update_layout( title_text = 'Education Products Engagement Index per state', geo_scope='usa', # limite map scope to USA ) fig.show() # + [markdown] papermill={"duration": 0.085166, "end_time": "2021-09-19T11:56:32.367289", "exception": false, "start_time": "2021-09-19T11:56:32.282123", "status": "completed"} tags=[] cell_id="00075-8c0e129e-28c2-44e0-87a4-5c6ef29c5e15" deepnote_cell_type="markdown" # # Focusing on People from Improvised Neighborhood # In this section, we will be focusing on the Black/Hispanic community and free or reduced fees. # + [markdown] papermill={"duration": 0.084537, "end_time": "2021-09-19T11:56:32.537567", "exception": false, "start_time": "2021-09-19T11:56:32.453030", "status": "completed"} tags=[] cell_id="00076-c657909a-6a12-4fad-a91e-8b3ea704012f" deepnote_cell_type="markdown" # ## Correlation # There is a high correlation between product access and engagement index, which means if the student has access to these products they might use them on daily basis. We can also see another high correlation between Black/Hispanic and Free/ Reduced education. This means the government is doing its job in helping improvised communities to get a proper education. # + _kg_hide-input=true papermill={"duration": 1.515691, "end_time": "2021-09-19T11:56:34.137605", "exception": false, "start_time": "2021-09-19T11:56:32.621914", "status": "completed"} tags=[] cell_id="00077-69b6b462-37c2-420c-abd9-667de9a33e23" deepnote_to_be_reexecuted=false source_hash="3607ded9" execution_start=1634282994488 execution_millis=1612 deepnote_cell_type="code" sns.heatmap(complete_data.drop(['LP ID','lp_id','district_id'],axis=1).corr(), cmap="YlGnBu", annot=True); # + [markdown] papermill={"duration": 0.085099, "end_time": "2021-09-19T11:56:34.315550", "exception": false, "start_time": "2021-09-19T11:56:34.230451", "status": "completed"} tags=[] cell_id="00078-675d3426-a05d-48cf-a9eb-3b365b6c451a" deepnote_cell_type="markdown" # ## Black/Hispanic products access over the year. # The graph below shows that over time the communities with a lesser population of Blacks and Hispanics have a similar pattern to some of the Top states we have discussed earlier. Let's compare this graph with the mixed communities. # + _kg_hide-input=true papermill={"duration": 0.685555, "end_time": "2021-09-19T11:56:35.088569", "exception": false, "start_time": "2021-09-19T11:56:34.403014", "status": "completed"} tags=[] cell_id="00079-f6a4ca50-d41b-48bd-98d4-1cf2f85fa6d9" deepnote_to_be_reexecuted=false source_hash="5c665fa3" execution_start=1634282996071 execution_millis=556 deepnote_cell_type="code" Imp_neigh= complete_data[complete_data['pct_black/hispanic']<0.2][['time','pct_access','pct_black/hispanic']].groupby('time').mean().reset_index() fig = px.line(Imp_neigh, x="time", y="pct_access") fig.update_layout( title=("Black/Hispanic community access to Online Education").title(), ) fig.show() # + [markdown] papermill={"duration": 0.086015, "end_time": "2021-09-19T11:56:35.262872", "exception": false, "start_time": "2021-09-19T11:56:35.176857", "status": "completed"} tags=[] cell_id="00080-ce916934-b6b0-4eeb-8658-65ca64047ba7" deepnote_cell_type="markdown" # We can see some changes as the peak has fallen from 1.4 to 1.2. It is a disparity among the different racial groups but its marginal compares to other countries. We can say Black/Hispanic communities have lesser access to educational products. # + _kg_hide-input=true papermill={"duration": 0.48215, "end_time": "2021-09-19T11:56:35.830947", "exception": false, "start_time": "2021-09-19T11:56:35.348797", "status": "completed"} tags=[] cell_id="00081-d8971106-7283-403b-8e04-0ef8e1f5b305" deepnote_to_be_reexecuted=false source_hash="3c54440b" execution_start=1634282996621 execution_millis=348 deepnote_cell_type="code" Imp_neigh= complete_data[complete_data['pct_black/hispanic']>=0.2][['time','pct_access','pct_black/hispanic']].groupby('time').mean().reset_index() fig = px.line(Imp_neigh, x="time", y="pct_access") fig.update_layout( title=("Black/Hispanic community access to Online Education").title(), ) fig.show() # + [markdown] _kg_hide-input=true papermill={"duration": 0.087904, "end_time": "2021-09-19T11:56:36.008710", "exception": false, "start_time": "2021-09-19T11:56:35.920806", "status": "completed"} tags=[] cell_id="00082-65fee160-61b2-45b3-95e6-3100142d1677" deepnote_cell_type="markdown" # # Conclusion # The data is limited to fewer states and I didn't found any concrete evidence that Black or Hispanic communities get unfair treatment. Online education is fairly balanced towards all. I did see how some of the states perform worst in terms of product access rating, this might be due to bad policymaking and lack of awareness. We can see that Google is dominating the online education industry by providing a complete ecosystem. The majority of Pre-K students are using these platforms for digital learning. Suburbs have the highest students accessing these products and we can see a clear correlation between product access and engagement index. There is also a high correlation between Black/Hispanic and Free education/ Reduced Fee, which means the government is doing its best to help the poor community by providing free education. # + [markdown] tags=[] created_in_deepnote_cell=true deepnote_cell_type="markdown" # <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=10b483d4-cd30-4b17-8915-b61c2790ec6c' target="_blank"> # <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV3Qm94PSIwIDAgODAgODAiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayI+CiAgICA8IS0tIEdlbmVyYXRvcjogU2tldGNoIDU0LjEgKDc2NDkwKSAtIGh0dHBzOi8vc2tldGNoYXBwLmNvbSAtLT4KICAgIDx0aXRsZT5Hcm91cCAzPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IkxhbmRpbmciIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxnIGlkPSJBcnRib2FyZCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoLTEyMzUuMDAwMDAwLCAtNzkuMDAwMDAwKSI+CiAgICAgICAgICAgIDxnIGlkPSJHcm91cC0zIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMjM1LjAwMDAwMCwgNzkuMDAwMDAwKSI+CiAgICAgICAgICAgICAgICA8cG9seWdvbiBpZD0iUGF0aC0yMCIgZmlsbD0iIzAyNjVCNCIgcG9pbnRzPSIyLjM3NjIzNzYyIDgwIDM4LjA0NzY2NjcgODAgNTcuODIxNzgyMiA3My44MDU3NTkyIDU3LjgyMTc4MjIgMzIuNzU5MjczOSAzOS4xNDAyMjc4IDMxLjY4MzE2ODMiPjwvcG9seWdvbj4KICAgICAgICAgICAgICAgIDxwYXRoIGQ9Ik0zNS4wMDc3MTgsODAgQzQyLjkwNjIwMDcsNzYuNDU0OTM1OCA0Ny41NjQ5MTY3LDcxLjU0MjI2NzEgNDguOTgzODY2LDY1LjI2MTk5MzkgQzUxLjExMjI4OTksNTUuODQxNTg0MiA0MS42NzcxNzk1LDQ5LjIxMjIyODQgMjUuNjIzOTg0Niw0OS4yMTIyMjg0IEMyNS40ODQ5Mjg5LDQ5LjEyNjg0NDggMjkuODI2MTI5Niw0My4yODM4MjQ4IDM4LjY0NzU4NjksMzEuNjgzMTY4MyBMNzIuODcxMjg3MSwzMi41NTQ0MjUgTDY1LjI4MDk3Myw2Ny42NzYzNDIxIEw1MS4xMTIyODk5LDc3LjM3NjE0NCBMMzUuMDA3NzE4LDgwIFoiIGlkPSJQYXRoLTIyIiBmaWxsPSIjMDAyODY4Ij48L3BhdGg+CiAgICAgICAgICAgICAgICA8cGF0aCBkPSJNMCwzNy43MzA0NDA1IEwyNy4xMTQ1MzcsMC4yNTcxMTE0MzYgQzYyLjM3MTUxMjMsLTEuOTkwNzE3MDEgODAsMTAuNTAwMzkyNyA4MCwzNy43MzA0NDA1IEM4MCw2NC45NjA0ODgyIDY0Ljc3NjUwMzgsNzkuMDUwMzQxNCAzNC4zMjk1MTEzLDgwIEM0Ny4wNTUzNDg5LDc3LjU2NzA4MDggNTMuNDE4MjY3Nyw3MC4zMTM2MTAzIDUzLjQxODI2NzcsNTguMjM5NTg4NSBDNTMuNDE4MjY3Nyw0MC4xMjg1NTU3IDM2LjMwMzk1NDQsMzcuNzMwNDQwNSAyNS4yMjc0MTcsMzcuNzMwNDQwNSBDMTcuODQzMDU4NiwzNy43MzA0NDA1IDkuNDMzOTE5NjYsMzcuNzMwNDQwNSAwLDM3LjczMDQ0MDUgWiIgaWQ9IlBhdGgtMTkiIGZpbGw9IiMzNzkzRUYiPjwvcGF0aD4KICAgICAgICAgICAgPC9nPgogICAgICAgIDwvZz4KICAgIDwvZz4KPC9zdmc+' > </img> # Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
evolution-of-digital-learning-during-covid19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Goal: scrape the website <a href="https://www.skinnytaste.com">SkinnyTaste.com</a> to retrieve the Blue Weight Watchers points values for all dinner recipes. Dump the result into a dataframe and sort by point values low to high. import requests import pandas as pd from bs4 import BeautifulSoup from time import sleep def getSoup( url ): response = requests.get( url ) soup = BeautifulSoup(response.content, 'html5lib') return soup # grab recipe urls for all 19 pages of dinner-recipes from skinnytaste.com all_recipe_urls = [] for page in range(1,20): print('page %d...' %(page),end='') url = 'https://www.skinnytaste.com/recipes/dinner-recipes/page/%d/' %(page) soup = getSoup(url) recipes = soup.find_all('a',{'rel':'bookmark'}) for r in recipes: all_recipe_urls.append(r['href']) print('done. %d recipes found.' %(len(all_recipe_urls))) file = open('skinny_db.csv','w') file.write('url,title,bluepoints\n') count = 1 # navigate to each dinner-recipe url and pull the name and blue ww points for url in all_recipe_urls: print('%d..' %(count), end='') try: soup = getSoup(url) bluepoints = soup.find('span', {'title':'blue'}).text title = soup.find('div',{'class':'post-title'}).h1.text if ',' in title: title = '"%s"' %(title) file.write('%s,%s,%s\n' %(url,title,bluepoints)) except: print('Error: %s' %(url)) pass count += 1 print('done') file.close() df = pd.read_csv('skinny_db.csv', encoding = 'ISO-8859-1') df.sort_values(['bluepoints']).head(40)
SkinnyTasteScraping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:root] * # language: python # name: conda-root-py # --- # # TEAM V - Final Project for Data Visualization (IS590DV Fall 2019) # by <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # ## [WISDM Smartphone and Smartwatch Activity and Biometrics Dataset Data Set](https://archive.ics.uci.edu/ml/datasets/WISDM+Smartphone+and+Smartwatch+Activity+and+Biometrics+Dataset+) # # From # > Smartphone and Smartwatch-Based Biometrics Using Activities of Daily Living. IEEE Access, 7:133190-133202, Sept. 2019. # # and # # > [<NAME>, <NAME> and <NAME> (2010). Activity Recognition using Cell Phone Accelerometers, Proceedings of the Fourth International Workshop on Knowledge Discovery from Sensor Data (at KDD-10), Washington DC.](http://www.cis.fordham.edu/wisdm/includes/files/sensorKDD-2010.pdf) # # <style>table {margin:0;} </style> # # | Data File Group | Total Size | Total Files | Instances | # |-:|-:|-:|-:| # |`Phone/Accel`| 250MB | 51 | 4,804,404 | # |`Phone/Gyro` | 205MB | 51 | 3,608,635 | # |`Watch/Accel`| 196MB | 51 | 3,777,048 | # |`Watch/Gyro` | 190MB | 51 | 3,440,344 | # |All | 1.1GB | 204 | 15,630,426| # + # %matplotlib widget from pathlib import Path from IPython.display import display import ipywidgets import ipywidgets as widgets from ipywidgets import interact, interactive import traitlets import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import bqplot # Thanks to `ImportanceOfBeingErnest` from https://stackoverflow.com/questions/47404653/pandas-0-21-0-timestamp-compatibility-issue-with-matplotlib pd.plotting.register_matplotlib_converters() # - subject_ids = [p.stem.split('_')[0] for p in Path('../data/processed/wisdm/merged_sensors/phone/').glob('*.csv')] activity_name_key = {k: v for k, v in [line.strip().split(' = ') for line in Path('../references/wisdm_activity_key.txt').read_text().strip().split('\n')]} activity_letter_key = {v: k for k, v in [line.strip().split(' = ') for line in Path('../references/wisdm_activity_key.txt').read_text().strip().split('\n')]} def get_by_subject_id(subject_id='1600'): subject_id = str(subject_id) # force into a string device_dict = { 'phone': pd.read_csv('../data/processed/wisdm/merged_sensors/phone/{}_phone.csv'.format(subject_id)).drop(columns=['timestamp']),#, parse_dates=['timestamp']) The time isn't really needed and adds a lot of loading time 'watch': pd.read_csv('../data/processed/wisdm/merged_sensors/watch/{}_watch.csv'.format(subject_id)).drop(columns=['timestamp'])#, parse_dates=['timestamp']) } for k,v in device_dict.items(): v.insert(2, 'activity_name', v['activity_code'].replace(activity_letter_key)) return device_dict all_phone_df = pd.read_csv('../data/processed/wisdm/merged_sensors/phone_subject_activity_counts.csv') all_watch_df = pd.read_csv('../data/processed/wisdm/merged_sensors/watch_subject_activity_counts.csv') # ## Part I: General Data Analytics # # Below are charts briefly analyzing the raw data through several basic facets. plt.ion() fig, ax = plt.subplots() fig.canvas.layout.width='800px' fig.canvas.layout.height='600px' ax.bar(['phone', 'watch'], [all_phone_df.num_rows.sum(), all_watch_df.num_rows.sum()], align='center', alpha=0.5, color='blue') ax.set_ylabel('Number of records') fig.suptitle('Total Records per Device') fig.show() # Comparing total records logged by the phone and the watch, the watch is logging approximately 14% more records than the phone. # + plt.ioff() rps_phone_count = all_phone_df.groupby('subject_id')['num_rows'].sum() rps_watch_count = all_watch_df.groupby('subject_id')['num_rows'].sum() fig, axs = plt.subplots(2,1) axs[0].set_title('Records for Phone by Subject') axs[1].set_title('Records for Watch by Subject') axs[0].grid(axis='x', alpha=0.1, zorder=-1) axs[1].grid(axis='x', alpha=0.1, zorder=-1) xlabels = [str(i) for i in rps_phone_count.index] axs[0].bar(xlabels, rps_phone_count.values, alpha=0.8, color='blue', zorder=5) axs[0].xaxis.set_tick_params(labelrotation=40) axs[0].set_xticklabels(xlabels, va='top', ha='right', rotation_mode='anchor') axs[0].set_xlabel('Subject ID', fontsize=12) axs[0].set_ylabel('Number of Records', fontsize=12) axs[1].bar(xlabels, rps_watch_count.values, alpha=0.8, color='orange', zorder=5) axs[1].xaxis.set_tick_params(labelrotation=40) axs[1].set_xticklabels(xlabels, va='top', ha='right', rotation_mode='anchor') axs[1].set_xlabel('Subject ID', fontsize=12) axs[1].set_ylabel('Number of Records', fontsize=12) fig.canvas.layout.height='1200px' fig.subplots_adjust(hspace=0.4) widgets.VBox([fig.canvas]) # - # We can observe that the variance between the number of records per subject is less in the watch than the variance of the same recorded in the phone. Subject 1629 has the highest number of activities recorded in both phone and watch, but it most probably looks like an outlier, but we aren’t quite sure why it is one. # + plt.ioff() # rpa_phone_count = all_phone_df.copy().replace({'activity_code': activity_letter_key})['activity_code'].value_counts() # rpa_watch_count = all_watch_df.copy().replace({'activity_code': activity_letter_key})['activity_code'].value_counts() rpa_phone_count = all_phone_df.replace({'activity_code': activity_letter_key}).groupby('activity_code')['num_rows'].sum().sort_values(ascending=False) rpa_watch_count = all_watch_df.replace({'activity_code': activity_letter_key}).groupby('activity_code')['num_rows'].sum().sort_values(ascending=False) fig, axs = plt.subplots(2,1) axs[0].set_title('Records for Phone by Activity') axs[1].set_title('Records for Watch by Activity') axs[0].grid(axis='y', alpha=0.2, zorder=-1) axs[1].grid(axis='y', alpha=0.2, zorder=-1) xlabels = [str(i) for i in rpa_phone_count.index] axs[0].bar(xlabels, rpa_phone_count.values, alpha=0.8, color='blue', zorder=5) axs[0].xaxis.set_tick_params(labelrotation=40) axs[0].set_xticklabels(xlabels, va='top', ha='right', rotation_mode='anchor') axs[0].set_xlabel('Activity', fontsize=12) axs[0].set_ylabel('Number of Records', fontsize=12) axs[0].set_ylim(0, 2*10**5) axs[1].bar(xlabels, rpa_watch_count.values, alpha=0.8, color='orange', zorder=5) axs[1].xaxis.set_tick_params(labelrotation=40) axs[1].set_xticklabels(xlabels, va='top', ha='right', rotation_mode='anchor') axs[1].set_xlabel('Activity', fontsize=12) axs[1].set_ylabel('Number of Records', fontsize=12) axs[1].set_ylim(0, 2*10**5) fig.canvas.layout.height='1200px' fig.subplots_adjust(hspace=0.4) widgets.VBox([fig.canvas]) # - # Surprisingly all the activities have the same sequence in watch or phone with respect to the number of records logged. The range of records logged in the phone range from 150000 to 180000, while the range of records logged in the watch range from 180000 to 190000. We can confirm by this that the watch is recording more data as we saw from the first part. # ## Part II: Aggregate Analytics # Below is an interactive bqplot project to display the aggregated/derived data of the dataset. It allows users to select subject ID (1600-1650), phone vs watch, coordinates (x_accel, y_accel, z_accel, x_gyro, y_gyro, z_gyro), and aggregation type (in total, there are 5 aggregation types: sum, mean, max, min, count). Users can select anything they want a from the four dropdowns, and the results will display with a barplot of the data of every activity correspondingly. By this interactive plot, users can learn about the aggregated results of the dataset. plt.ion() @interact(Subject_ID = subject_ids) def get_subject(Subject_ID): ID=Subject_ID @interact(Device = ['phone', 'watch']) def get_device(Device): device=Device @interact(Coordinate = ['x_accel', 'y_accel','z_accel','x_gyro','y_gyro','z_gyro']) def get_coordinate(Coordinate): coordinate=Coordinate @interact(Aggregate = ['sum', 'mean','max','min','count']) def change_aggregation(Aggregate): if Aggregate=='sum': y = get_by_subject_id(ID).get(device).groupby('activity_code')[coordinate].sum() if Aggregate=='mean': y = get_by_subject_id(ID).get(device).groupby('activity_code')[coordinate].mean() if Aggregate=='max': y = get_by_subject_id(ID).get(device).groupby('activity_code')[coordinate].max() if Aggregate=='min': y = get_by_subject_id(ID).get(device).groupby('activity_code')[coordinate].min() if Aggregate=='count': y = get_by_subject_id(ID).get(device).groupby('activity_code')[coordinate].count() bin_x_sc = bqplot.OrdinalScale() bin_x_ax = bqplot.Axis(scale = bin_x_sc,label='activity code') bin_y_sc = bqplot.LinearScale() bin_y_ax = bqplot.Axis(scale = bin_y_sc,orientation='vertical',label='value') bars = bqplot.Bars(x = y.index,y = y,scales = {'x': bin_x_sc, 'y': bin_y_sc}) fig = bqplot.Figure(marks = [bars], axes = [bin_x_ax, bin_y_ax]) display(fig) # By looking at each activity's mean values for the various acceleration axes, some observations regarding the overall motion of the activity can be made. For an example, using subject 1600: # # 1. **Device**: Phone; **Axis**: _x\_accel_; **Aggregation method**: _mean_<br/> # <p style='margin-left: 2em'>Activity teeth has the highest mean for x_accel which is near 5, and the activity dribbling has the lowest mean which is nearly -4. All the activities which have a negative mean, the means are around -2 or more than that, except for the activity dribbling. &nbsp;Also activity typing has a near to 0 mean.</p> # # 2. **Device**: Watch; **Axis**: _x\_accel_; **Aggregation method**: _mean_<br/> # <p style='margin-left: 2em'>Except for activities sitting and folding polarity of mean of all the activities have changed for the device watch. Walking has the highest x_accel mean, while teeth has the lowest x_accel. Interestingly activity teeth had the highest x_accel for the phone. &nbsp;Activities writing and typing have a near to 0 mean.</p> # # 3. **Device**: Phone; **Axis**: _y\_accel_; **Aggregation method**: _mean_<br/> # <p style='margin-left: 2em'>Mean of y_accel for all the activities is positive. Catching and folding have the highest y_accel mean, while teeth has the lowest y_accel mean.</p> # # 4. **Device**: Watch; **Axis**: _y\_accel_; **Aggregation method**: _mean_<br/> # <p style='margin-left: 2em'>Mean of y_accel for all the activities is negative. Writing has the lowest(highest in magnitude) y_accel mean, interestingly writing had almost null x_accel mean in the watch. Drinking has the highest mean (lowest in magnitude)</p> # + plt.ion() fig = plt.figure() fig.suptitle('Scatter Plot of the WISDM Data Statespace Sum') fig.canvas.layout.width = "1000px" fig.canvas.layout.height = "1000px" t = None cb = None cmap = None def update_scatter(subject_id=None, device=None, activity=None, stepsize=None): global t, cb, cmap Subject_ID = subject_id or 1600 Device = device or 'phone' Activity = activity or 'A' StepSize = stepsize or 10 x = get_by_subject_id(Subject_ID).get(Device).groupby('activity_code').get_group(Activity)[['x_accel','y_accel','z_accel','x_gyro','y_gyro','z_gyro']] # ax = Axes3D(fig) ax = fig.add_subplot(111, projection='3d') smooth_x = x[::StepSize] t = ax.scatter(smooth_x['x_accel'],smooth_x['y_accel'],smooth_x['z_accel'], c = plt.cm.jet(np.linspace(0,1,len(smooth_x)))) ax.set_title('Subject #{} `{}` ({}) using {}'.format(Subject_ID, activity_letter_key.get(Activity).capitalize(), Activity, Device.capitalize())) ax.set_xlabel('X-Axis') ax.set_ylabel('Y-Axis') ax.set_zlabel('Z-Axis') fig.canvas.draw() fig.canvas.flush_events() def get_subject(change): update_scatter(subject_id=change.new) def get_device(change): update_scatter(device=change.new) def get_activity(change): update_scatter(activity=change.new) def get_stepsize(change): update_scatter(stepsize=change.new) subject_dd = widgets.Dropdown(options = subject_ids, description='Subject ID:', value='1600') device_dd = widgets.Dropdown(options = ['phone', 'watch'], description='Device Type:', value='phone') activity_dd = widgets.Dropdown(options=activity_name_key, description='Activity:', value='A') stepsize_sl = widgets.IntSlider(min=1, max=20, value=10) subject_dd.observe(get_subject, names='value') device_dd.observe(get_device, names='value') activity_dd.observe(get_activity, names='value') stepsize_sl.observe(get_stepsize, names='value') update_scatter() widgets.VBox([widgets.HBox([subject_dd, device_dd, activity_dd]), widgets.HBox([widgets.Label(value='Steps between Points:'), stepsize_sl])]) # - # The color gradient explains the start and the end of a person’s activity for all the 18 activities. # The dark blue color represents the start of the persons activity at the start and as it reaches the end it’s represented by a dark red color. # # After seeing the 2d visualizations we moved forward to visualizing the 3d scatter plots just to see how the movement for different activities in the 3-dimmensional space. # # First, we plotted the normal 3d plots but it was just random scatter and we were unable to differentiate between the start and end of any activity. We determined a different color scale would help perceive this difference. # # Inferences for subject 1600: # * Walking: An outlier on the top left corner of the plot. # * Jogging: An outlier on the top of the plot. # * Standing: More outliers on the right of the plot # * Stairs: Some outliers in the entire plot # # With the color scale applied to these plots, we observed the cumulative sum of the accelerations might produce a more informational pattern for the activities. # As for the outliers, we can only guess. We do not know where they are walking, the surface, or conditions. While walking, the outlier could mean the subject jerked or jumped suddenly. This is an interesting question to be raised! # + plt.ion() fig = plt.figure() fig.suptitle('Scatter Plot of the WISDM Data Cumulative Sum over Time') fig.canvas.layout.width = "1000px" fig.canvas.layout.height = "1000px" t = None cb = None cmap = None def update_scatter(subject_id=None, device=None, activity=None, stepsize=None): global t, cb, cmap Subject_ID = subject_id or 1600 Device = device or 'phone' Activity = activity or 'A' StepSize = stepsize or 10 x = get_by_subject_id(Subject_ID).get(Device).groupby('activity_code').get_group(Activity)[['x_accel','y_accel','z_accel','x_gyro','y_gyro','z_gyro']].cumsum(axis = 0) # ax = Axes3D(fig) ax = fig.add_subplot(111, projection='3d') smooth_x = x[::StepSize] t = ax.scatter(smooth_x['x_accel'],smooth_x['y_accel'],smooth_x['z_accel'], c = plt.cm.jet(np.linspace(0,1,len(smooth_x)))) ax.set_title('Subject #{} `{}` ({}) using {}'.format(Subject_ID, activity_letter_key.get(Activity).capitalize(), Activity, Device.capitalize())) ax.set_xlabel('X-Axis') ax.set_ylabel('Y-Axis') ax.set_zlabel('Z-Axis') fig.canvas.draw() fig.canvas.flush_events() def get_subject(change): update_scatter(subject_id=change.new) def get_device(change): update_scatter(device=change.new) def get_activity(change): update_scatter(activity=change.new) def get_stepsize(change): update_scatter(stepsize=change.new) subject_dd = widgets.Dropdown(options = subject_ids, description='Subject ID:', value='1600') device_dd = widgets.Dropdown(options = ['phone', 'watch'], description='Device Type:', value='phone') activity_dd = widgets.Dropdown(options=activity_name_key, description='Activity:', value='A') stepsize_sl = widgets.IntSlider(min=1, max=20, value=10) subject_dd.observe(get_subject, names='value') device_dd.observe(get_device, names='value') activity_dd.observe(get_activity, names='value') stepsize_sl.observe(get_stepsize, names='value') update_scatter() widgets.VBox([widgets.HBox([subject_dd, device_dd, activity_dd]), widgets.HBox([widgets.Label(value='Steps between Points:'), stepsize_sl])]) # - # We can find more patterns in the figures of cumulative sums. Compared to simple scatter plots where scatters are messly distributed, scatters in cumulative sum plots show a clear path. In the figure, blue is beginning point, and red is ending point. From this, we can find that in every activity, y values go up, while the x and z values vary (some go up, others go down), depending on different activities. # ## Part III: Adjusted Data # + activities = get_by_subject_id(1600)['phone'].groupby('activity_name') dfs = {activity: activities.get_group(activity)[['x_accel', 'y_accel', 'z_accel']]/20 for activity in ['walking', 'jogging', 'stairs', 'standing']} fig, axs = plt.subplots(4, figsize=(10, 10)) for i, (name, df) in enumerate(dfs.items()): ax = axs[i] adj_df = df.sub(df.mean(axis=0)).cumsum(axis=0) ax.plot(adj_df.z_accel, label='Z Axis (Forward)') ax.plot(adj_df.x_accel, label='X Axis (Side-to-Side)') ax.plot(adj_df.y_accel, label='Y Axis (Up and Down)') if ax.is_last_row(): ax.set_xlabel('Time (50ms per step)') ax.set_ylabel('Meters/0.05 seconds squared') ax.set_title(name.capitalize()) fig.canvas.layout.width = '1200px' fig.canvas.layout.height = '1500px' fig.suptitle('Acceleration with Adjusted Axes over Time for 4 Activities') fig.subplots_adjust(top=0.91, left=0.05, right=0.95, hspace=0.5) fig.legend(['Z Axis (Forward)', 'X Axis (Side-to-Side)', 'Y Axis (Up and Down)'], loc='upper center', ncol=3, bbox_to_anchor=(0.5, 0.97)) # - # The problems discovered in the previous visualizatins prompted further analysis. The second paper, [Activity Recognition using Cell Phone Accelerometers],(http://www.cis.fordham.edu/wisdm/includes/files/sensorKDD-2010.pdf) (Kwaspiz et al.) helped identify where adjustments to the data may assist in creating a more sensible plot. # # The first is the reason for the consistently larger magnitude of the Y-Acceleration, causing the significant growth of the cumulative sum. This phenomenon is explained on page 3: # >Note that for most activities the y values have the largest accelerations. This is a consequence of Earth’s gravitational pull, which causes the accelerometer to measure a value of 9.8 m/s2 in the direction of the Earth’s center. For all activities except sitting this direction corresponds to the y axis # # Additionally, the paper depicts a graphic of the directionality of the movement with respect to the person. Since the phone is in the subject's pocket, all upright activities (walking, jogging, standing, and stairs) have the Z-axis as the forward vector, and the X axis as the side to side. # # Where subtracting __gravitation__ (at $9.807 \frac{m}{s^2}$, then dividing by 20 to match the 20Hz recording rate) would seem to make the most sense, it doesn't work as well as hoped. Instead, the overall _mean_ is is subtracted from each Y-Acceleration value prior to summation, using the device's frame-of-reference. Due to Y being not __necessarily__ downward, this subtraction isn't a perfectly sound assumption. Applying instantaneous rotation using gyroscopic data was attempted, but did not help to correct for any deviations, since the original orientation of the device is not known (especially for the watch). In addition, in a real-time setting, this would be untenable. # # The resulting visualization shows the plotted path of the summed acceleration of each subject's device for each activity over time. In addition to the options to choose the subject, device, and activity, the slider will follow the path over a predefined time range. Sensible patterns for the activity in question emerge from this view-resolution, not easily apparent in previous visualizations. This indicates that manual course-correction may be possible with help from the visualization. # + plt.ioff() fig = plt.figure() fig.canvas.layout.width = '1600px' fig.canvas.layout.height = '1200px' class AdjustedPlot(): def __init__(self, fig_): self.fig = fig_ self.fig.suptitle('Adjusted Cumulative Instantateous Accelerations over Time') # Data Initialization self.view_span = 500 self.subject_id = '1600' self.device = 'phone' self.activity_code = 'A' self.output = widgets.Output() self.update_data() # Initial Plot self.ax = self.fig.add_subplot(111, projection='3d') self.scatter = self.ax.scatter3D(self.df.x_accel, self.df.z_accel, self.df.y_accel, c=plt.cm.jet(np.linspace(0,1,len(self.df)))) self.lines = self.ax.plot(self.df.x_accel, self.df.z_accel, self.df.y_accel, color='black', alpha=0.5) self.ax.set_xlabel('X-Axis') self.ax.set_ylabel('Adjusted Y-Axis (originally Z)') self.ax.set_zlabel('Adjusted Z-Axis (originally Y)') self.update_view() # Widgets self.subject_dropdown = widgets.Dropdown(options=subject_ids, value=self.subject_id, description='Subject ID:') self.device_dropdown = widgets.Dropdown(options=['phone', 'watch'], value=self.device, description='Device:') self.activity_dropdown = widgets.Dropdown(options=activity_name_key, value=self.activity_code, description='Activity:') self.view_slider = widgets.IntSlider(min=0, max=self.df.shape[0]-self.view_span, value=1.0, description='View Frame:') # Observers self.subject_dropdown.observe(self.on_change_subject(), names='value') self.device_dropdown.observe(self.on_change_device(), names='value') self.activity_dropdown.observe(self.on_change_activity(), names='value') self.view_slider.observe(self.on_change_view(), names='value') def update_data(self, subject_id=None, device=None, activity_code=None): if subject_id or not self.subject_id: self.subject_id = subject_id or '1600' if device or not self.device: self.device = device or 'phone' if activity_code or not self.activity_code: self.activity_code = activity_code or 'A' self.subject_dfs = get_by_subject_id(self.subject_id).copy() self.df = self.subject_dfs[self.device] self.df = self.df[self.df['activity_code'] == self.activity_code].reset_index(drop=True)[['x_accel', 'y_accel', 'z_accel']] / 20 self.df.y_accel = self.df.y_accel.sub(self.df.y_accel.mean()) self.df = self.df.cumsum(axis=0) def update_canvas(self): self.ax.relim() self.fig.canvas.draw() self.fig.canvas.flush_events() def update_plot(self): # https://stackoverflow.com/questions/41602588/matplotlib-3d-scatter-animations self.lines[0].set_data(self.df.x_accel, self.df.z_accel) self.lines[0].set_3d_properties(self.df.y_accel) self.scatter.remove() self.scatter = self.ax.scatter3D(self.df.x_accel, self.df.z_accel, self.df.y_accel, c=plt.cm.jet(np.linspace(0,1,len(self.df)))) self.view_slider.max = self.df.shape[0] - self.view_span self.view_slider.value = 1 self.ax.relim() self.ax.autoscale_view() def update_view(self, start=0): part = self.df.iloc[start: start + self.view_span] self.ax.set_xlim((part.x_accel.min() - 3, part.x_accel.max() + 3)) self.ax.set_ylim((part.z_accel.min() - 3, part.z_accel.max() + 3)) self.ax.set_zlim((part.y_accel.min() - 3, part.y_accel.max() + 3)) def on_change_subject(self): def callback(change): self.update_view() self.update_data(subject_id=change.new) self.update_plot() self.update_canvas() self.update_view() return callback def on_change_device(self): def callback(change): self.update_view() self.update_data(device=change.new) self.update_plot() self.update_canvas() self.update_view() return callback def on_change_activity(self): def callback(change): self.update_view() self.update_data(activity_code = change.new) self.update_plot() self.update_canvas() self.update_view() return callback def on_change_view(self): def callback(change): self.update_view(change.new) self.update_canvas() return callback def _p(self, *args): with self.output: print(*args) ajp = AdjustedPlot(fig) plt.ion() widgets.HBox([widgets.VBox([widgets.HBox([ajp.subject_dropdown, ajp.device_dropdown, ajp.activity_dropdown]), ajp.view_slider, fig.canvas])]) # - # Thanks for Viewing!
notebooks/IS590DV_Fa19_Final_Presentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Yw5yHQYjc8nl" # # To Fish or Not to Fish # > "A PyTorch tutorial for dealing with Custom Image Data" # # - toc: true # - branch: master # - badges: true # - comments: false # - categories: [PyTorch, DataLoaders, Image Manipulation] # - image: images/tofish.jpg # - hide: false # - search_exclude: true # + [markdown] id="Ks9jga4qA1x0" # ## Importing Data from Kaggle to Colab # # I collected the [Fish Dataset](https://www.kaggle.com/crowww/a-large-scale-fish-dataset) from Kaggle into a Google Colab Notebook. I have used the steps that were suggested in this [link](https://www.kaggle.com/general/156610) for downloading data into a Colab environment. # + colab={"base_uri": "https://localhost:8080/"} id="WQVjLKsxAyu9" outputId="76bd9df4-8933-423a-cc50-81b7d387b901" from google.colab import drive drive.mount('/content/gdrive') # + id="Gl2Z7JQoA6MQ" from google.colab import files files.upload() # + id="r3u3RNNrBI4P" # !mkdir -p ~/.kaggle # !cp kaggle.json ~/.kaggle/ # + id="sQuAziEyBLrw" # !chmod 600 /root/.kaggle/kaggle.json # + colab={"base_uri": "https://localhost:8080/"} id="ponDVDMgBNt7" outputId="ac19385d-9405-4f5d-8db6-b318e8719846" # !pwd # + colab={"base_uri": "https://localhost:8080/"} id="ScuIWsz6BPBR" outputId="cc78f903-b9f4-4312-fe59-478090a2a1d3" # !kaggle datasets download -d crowww/a-large-scale-fish-dataset # + id="zDmBqkOzBQsq" # !unzip a-large-scale-fish-dataset.zip # + [markdown] id="Qy6ABjRwBWN3" # ## Libraries # + id="7RQz4_OVBUnQ" import torch import torch.nn as nn import torchvision.transforms as transforms import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import sklearn from sklearn.model_selection import train_test_split import numpy as np import pandas as pd import random import os import matplotlib.pyplot as plt import PIL from PIL import Image import time import seaborn as sns import glob from pathlib import Path torch.manual_seed(1) np.random.seed(1) # + [markdown] id="eOnZEaLSBlBg" # ## Image Analysis # + [markdown] id="bietTM_uBo7u" # ### Step-1: Defining Folder Path # # 1. We select the folder which contains images of Fishes in PNG format. # 2. We separate actual images from segmented images. # 3. We save the classes in an integer format(can be used later for Data Classification) # + id="h_uF7K1sBi4e" data_path = Path.cwd()/'Fish_Dataset/Fish_Dataset' # Path for all the files in a 'png' format. image_path = list(data_path.glob('**/*.png')) # Separate Segmented from Non-Segmented Images non_segmented_images = [img for img in image_path if 'GT' not in str(img)] labels_non_segment = [img.parts[-3] for img in non_segmented_images] segmented_images = [img for img in image_path if 'GT' in str(img)] lables_segment = [img.parts[-3] for img in segmented_images] classes = list(set(lables_segment)) # Convert String Labels to int int_classes = {fish:i for i,fish in enumerate(classes)} lables = [int_classes[lable] for lable in labels_non_segment] # + [markdown] id="h3tjvgK2ChX1" # ### Step-2: Opening an Image # + colab={"base_uri": "https://localhost:8080/", "height": 304} id="K3eTgAs5Bwaa" outputId="a14736ea-3f66-404f-84ec-8593bff3b85b" ## Open an Image print(f"Fish Class:{classes[image_data.labels[200]]}") # Image.open(image_data.Path[150]) img1 = Image.open(image_data.Path[200]) plt.imshow(img1) # + [markdown] id="hlGSNW2lFdKV" # ## Image Augmentation # + id="2wSOFVFyCane" from PIL import Image from pathlib import Path import matplotlib.pyplot as plt import numpy as np import torch import torchvision.transforms as T torch.manual_seed(0) def plot(imgs, with_orig=True, row_title=None, **imshow_kwargs): """ Official PyTorch source: https://pytorch.org/vision/stable/auto_examples/plot_transforms.html#sphx-glr-auto-examples-plot-transforms-py """ if not isinstance(imgs[0], list): # Make a 2d grid even if there's just 1 row imgs = [imgs] num_rows = len(imgs) num_cols = len(imgs[0]) + with_orig fig, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False) for row_idx, row in enumerate(imgs): row = [orig_img] + row if with_orig else row for col_idx, img in enumerate(row): ax = axs[row_idx, col_idx] ax.imshow(np.asarray(img), **imshow_kwargs) ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]) if with_orig: axs[0, 0].set(title='Original image') axs[0, 0].title.set_size(8) if row_title is not None: for row_idx in range(num_rows): axs[row_idx, 0].set(ylabel=row_title[row_idx]) plt.show() # + [markdown] id="i5KLkABPHA_I" # ### Padding # + colab={"base_uri": "https://localhost:8080/", "height": 108} id="Of9DlIpBFuEF" outputId="845b1658-e701-48d2-b68f-7ee64afbc9ce" orig_img = img1 padded_imgs = [T.Pad(padding=padding)(orig_img) for padding in (10,100,500)] plot(padded_imgs) # + [markdown] id="igIY_viDHDO7" # ### Resizing # + id="VCd7X-qlF1vx" resized_imgs = [T.Resize(size=s)(orig_img) for s in ((100,100),(200,200),(280,280), (1000,1000))] # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="MM7qvzz4U-pZ" outputId="4238e315-d92a-4b16-de5c-13d4a2f92271" # Observe the x and y scale plt.imshow(resized_imgs[0]) # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="tUrXCPk0U-Yk" outputId="4c77ca31-4ce3-467b-a78f-8615620fa051" # The x and y scale is in the range of 1000 plt.imshow(resized_imgs[-1]) # + [markdown] id="gTl93OL0VSnz" # ### Adding Jitters # + colab={"base_uri": "https://localhost:8080/", "height": 116} id="pnnK_uIAHP8i" outputId="1871fa69-aece-4f63-b247-c0a35b029e76" jitter = T.ColorJitter(brightness=.5, hue=.3) jitted_imgs = [jitter(orig_img) for _ in range(3)] plot(jitted_imgs) # + [markdown] id="tl5RpzfTVWFA" # ### Flipping # + colab={"base_uri": "https://localhost:8080/", "height": 91} id="PGHSfw_PHotj" outputId="2796887c-a607-41b2-ad07-63d215462b54" horiflip = T.RandomHorizontalFlip(p=0.5) hor_flip_img = [horiflip(orig_img) for _ in range(4)] plot(hor_flip_img) # + [markdown] id="cLcuCF1VV0mG" # ## Data Preparation for Modeling # # - Firstly, create a dataframe with 'Image Path' and the 'Image Label' against the 'Image Path' # + id="Spbi3aB4JvCz" image_data = pd.DataFrame({'Path': non_segmented_images,\ 'labels': lables}) # + [markdown] id="JHL7oZQCWzQs" # - Create a Custom class to upload the data. # - The class should be able to implement Transformation on an individual image. # + id="jC4aulgxWced" class FishDataset(Dataset): """Class for loading an Image.""" def __init__(self, images, labels, transform = None): self.images = images self.labels = labels self.transform = transform def __len__(self): return len(self.labels) def __getitem__(self, idx): img = Image.open(self.images.iloc[idx]) if self.transform: img = self.transform(img) label = self.labels.iloc[idx] return img, label # + [markdown] id="bsSEPwdFXTfB" # - Now Apply Transformation: # - When augmenting an Image, we do not want to apply any randomness to unseen or Test data. Thus, we always go for transforming Train and Test differently. # + id="qNZl6IiIXGKK" ########################## ### FISH DATASET ########################## train_transform = transforms.Compose([transforms.Resize((64,64)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) test_transforms = transforms.Compose([transforms.Resize((64,64)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # + [markdown] id="V12v2QXzXzet" # ### Prepare Data Loaders # + id="ml6cKdQFXYjX" def get_loaders(train, train_labels, val, val_labels,test, test_labels, batch_size, num_workers, train_transform, test_transform): """ Returns the Train, Validation and Test DataLoaders. """ train_ds = FishDataset(images = train, labels = train_labels, transform = train_transform) val_ds = FishDataset(images = val, labels = val_labels, transform = test_transforms) test_ds = FishDataset(images = test, labels = test_labels, transform = test_transforms) train_loader = DataLoader(train_ds, batch_size=batch_size,num_workers=num_workers, shuffle= True) val_loader = DataLoader(val_ds, batch_size=batch_size,num_workers=num_workers, shuffle= False) test_loader = DataLoader(test_ds, batch_size=batch_size,num_workers=num_workers, shuffle= False) return train_loader, val_loader, test_loader # + [markdown] id="Lz0-W5UnX9iX" # ## Create Train,Test and Validation Sets # + id="IRVBAtPpYBjb" RANDOM_SEED = 123 BATCH_SIZE = 128 NUM_EPOCHS = 10 WORKERS = 2 # Suggested for COlab DEVICE = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu') # + id="guJ9NRmkYVSU" # Perform Train/Val/Test Split train,test, train_labels, test_labels = train_test_split(image_data.Path, image_data.labels, test_size=0.2, shuffle=True) train,val, train_labels, val_labels = train_test_split(train, train_labels, test_size=0.2, shuffle=True) # + id="CeC9gJwHX1xD" ## Create Data Loaders train_loader, val_loader, test_loader = get_loaders(train,train_labels,val, val_labels, test,test_labels, BATCH_SIZE,WORKERS, train_transform, test_transforms) # + [markdown] id="4XUbOYK2Ya-B" # - Lets observe the Shape of Train/Val/Test Data # + colab={"base_uri": "https://localhost:8080/"} id="zSfRf98FYIuk" outputId="3ab8d558-f987-4b05-dd5b-c6b390fc1c69" #collapse_show ## Training Data for (image,label) in train_loader: print(image.shape) print(label.shape) break # + colab={"base_uri": "https://localhost:8080/"} id="wKwdYt8EZswF" outputId="cef7e828-bdcc-4cd0-a60b-2ee4625939d9" #collapse_show ## Validation Data for (image,label) in val_loader: print(image.shape) print(label.shape) break # + colab={"base_uri": "https://localhost:8080/"} id="UCjcSw1UZzHe" outputId="e67ac756-2674-4592-94eb-53632fb63e5f" #collapse_show # Test Data for (image,label) in test_loader: print(image.shape) print(label.shape) break # + [markdown] id="i8biQQfLZ4gM" # - We observe that the dimensions of Train,Test and Validation are consistent. # - We were able to transform images in the required format. # - Each imput image consisted of 3 channels and each channel was (64,64) in size.
_notebooks/2021-06-11-Image-Loading.ipynb