text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
# COVID-19 exploratory data analysis ver. A.L. 20200512 **Slightly modified from Greg Rafferty's** https://github.com/raffg/covid-19; <br>see also his dashboard to monitor the COVID-19 pandemic https://covid-19-raffg.herokuapp.com and his [portfolio](https://github.com/raffg/portfolio/blob/master/README.md) ### Uses data provided by the [Johns Hopkins Center for Systems Science and Engineering](https://github.com/CSSEGISandData/COVID-19) Requires: - plotly: https://plotly.com/python (`conda install plotly`) - cufflinks: https://plotly.com/python/v3/ipython-notebooks/cufflinks (`pip install cufflinks --upgrade`) ## Learning objectives - How to read (updated) data from the web - How to organize and analyse data using `pandas` - How to make interactive graphs using `plotly` ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import glob import re from datetime import date, timedelta import io import requests import plotly print('plotly:', plotly.__version__) # Standard plotly imports import plotly.graph_objects as go from plotly.offline import iplot, init_notebook_mode # Using plotly + cufflinks in offline mode import cufflinks print('cufflinks:', cufflinks.__version__) cufflinks.go_offline(connected=True) init_notebook_mode(connected=True) # # Load files from folder # path = 'COVID-19/csse_covid_19_data/csse_covid_19_daily_reports' # all_files = glob.glob(path + "/*.csv") # files = [] # for filename in all_files: # file = re.search(r'([0-9]{2}\-[0-9]{2}\-[0-9]{4})', filename)[0] # df = pd.read_csv(filename, index_col=None, header=0) # df['date'] = pd.to_datetime(file) # files.append(df) # df = pd.concat(files, axis=0, ignore_index=True, sort=False) ``` ``` # Load files from web file_date = date(2020, 1, 22) dates = [] while file_date <= date.today(): dates.append(file_date) file_date += timedelta(days=1) files = [] for file in dates: file = file.strftime("%m-%d-%Y") print(file) url = r'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{}.csv'.format(file) raw_string = requests.get(url).content dff = pd.read_csv(io.StringIO(raw_string.decode('utf-8'))) dff['date'] = pd.to_datetime(file) dff.rename(columns={'Country_Region': 'Country/Region'}, inplace=True) files.append(dff) dff = pd.concat(files, axis=0, ignore_index=True, sort=False) ``` ``` dff.info() <class 'pandas.core.frame.DataFrame'> RangeIndex: 165400 entries, 0 to 165399 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Province/State 4358 non-null object 1 Country/Region 165400 non-null object 2 Last Update 7617 non-null object 3 Confirmed 165381 non-null float64 4 Deaths 164959 non-null float64 5 Recovered 165012 non-null float64 6 date 165400 non-null datetime64[ns] 7 Latitude 4799 non-null float64 8 Longitude 4799 non-null float64 9 FIPS 143674 non-null float64 10 Admin2 144198 non-null object 11 Province_State 148664 non-null object 12 Last_Update 157783 non-null object 13 Lat 155291 non-null float64 14 Long_ 155291 non-null float64 15 Active 157783 non-null float64 16 Combined_Key 157783 non-null object 17 404: Not Found 0 non-null object dtypes: datetime64[ns](1), float64(9), object(8) memory usage: 22.7+ MB ``` ``` # Save to disk (overwrite previous version) #dff.to_csv('./data/csse_covid_19_daily_reports.csv', encoding='utf-8', index=False) tmp = pd.read_csv('./data/csse_covid_19_daily_reports.csv') df = tmp # Rename countries with duplicate naming conventions df['Country/Region'].replace('Mainland China', 'China', inplace=True) df['Country/Region'].replace('Hong Kong SAR', 'Hong Kong', inplace=True) df['Country/Region'].replace(' Azerbaijan', 'Azerbaijan', inplace=True) df['Country/Region'].replace('Holy See', 'Vatican City', inplace=True) df['Country/Region'].replace('Iran (Islamic Republic of)', 'Iran', inplace=True) df['Country/Region'].replace('Taiwan*', 'Taiwan', inplace=True) df['Country/Region'].replace('Korea, South', 'South Korea', inplace=True) df['Country/Region'].replace('Viet Nam', 'Vietnam', inplace=True) df['Country/Region'].replace('Macao SAR', 'Macau', inplace=True) df['Country/Region'].replace('Russian Federation', 'Russia', inplace=True) df['Country/Region'].replace('Republic of Moldova', 'Moldova', inplace=True) df['Country/Region'].replace('Czechia', 'Czech Republic', inplace=True) df['Country/Region'].replace('Congo (Kinshasa)', 'Congo', inplace=True) df['Country/Region'].replace('Northern Ireland', 'United Kingdom', inplace=True) df['Country/Region'].replace('Republic of Korea', 'North Korea', inplace=True) df['Country/Region'].replace('Congo (Brazzaville)', 'Congo', inplace=True) df['Country/Region'].replace('Taipei and environs', 'Taiwan', inplace=True) df['Country/Region'].replace('Others', 'Cruise Ship', inplace=True) df['Province/State'].replace('Cruise Ship', 'Diamond Princess cruise ship', inplace=True) df['Province/State'].replace('From Diamond Princess', 'Diamond Princess cruise ship', inplace=True) # Replace old reporting standards df['Province/State'].replace('Chicago', 'Illinois', inplace=True) df['Province/State'].replace('Chicago, IL', 'Illinois', inplace=True) df['Province/State'].replace('Cook County, IL', 'Illinois', inplace=True) df['Province/State'].replace('Boston, MA', 'Massachusetts', inplace=True) df['Province/State'].replace(' Norfolk County, MA', 'Massachusetts', inplace=True) df['Province/State'].replace('Suffolk County, MA', 'Massachusetts', inplace=True) df['Province/State'].replace('Middlesex County, MA', 'Massachusetts', inplace=True) df['Province/State'].replace('Norwell County, MA', 'Massachusetts', inplace=True) df['Province/State'].replace('Plymouth County, MA', 'Massachusetts', inplace=True) df['Province/State'].replace('Norfolk County, MA', 'Massachusetts', inplace=True) df['Province/State'].replace('Berkshire County, MA', 'Massachusetts', inplace=True) df['Province/State'].replace('Unknown Location, MA', 'Massachusetts', inplace=True) df['Province/State'].replace('Los Angeles, CA', 'California', inplace=True) df['Province/State'].replace('Orange, CA', 'California', inplace=True) df['Province/State'].replace('Santa Clara, CA', 'California', inplace=True) df['Province/State'].replace('San Benito, CA', 'California', inplace=True) df['Province/State'].replace('Humboldt County, CA', 'California', inplace=True) df['Province/State'].replace('Sacramento County, CA', 'California', inplace=True) df['Province/State'].replace('Travis, CA (From Diamond Princess)', 'California', inplace=True) df['Province/State'].replace('Placer County, CA', 'California', inplace=True) df['Province/State'].replace('San Mateo, CA', 'California', inplace=True) df['Province/State'].replace('Sonoma County, CA', 'California', inplace=True) df['Province/State'].replace('Berkeley, CA', 'California', inplace=True) df['Province/State'].replace('Orange County, CA', 'California', inplace=True) df['Province/State'].replace('Contra Costa County, CA', 'California', inplace=True) df['Province/State'].replace('San Francisco County, CA', 'California', inplace=True) df['Province/State'].replace('Yolo County, CA', 'California', inplace=True) df['Province/State'].replace('Santa Clara County, CA', 'California', inplace=True) df['Province/State'].replace('San Diego County, CA', 'California', inplace=True) df['Province/State'].replace('Travis, CA', 'California', inplace=True) df['Province/State'].replace('Alameda County, CA', 'California', inplace=True) df['Province/State'].replace('Madera County, CA', 'California', inplace=True) df['Province/State'].replace('Santa Cruz County, CA', 'California', inplace=True) df['Province/State'].replace('Fresno County, CA', 'California', inplace=True) df['Province/State'].replace('Riverside County, CA', 'California', inplace=True) df['Province/State'].replace('Shasta County, CA', 'California', inplace=True) df['Province/State'].replace('Seattle, WA', 'Washington', inplace=True) df['Province/State'].replace('Snohomish County, WA', 'Washington', inplace=True) df['Province/State'].replace('King County, WA', 'Washington', inplace=True) df['Province/State'].replace('Unassigned Location, WA', 'Washington', inplace=True) df['Province/State'].replace('Clark County, WA', 'Washington', inplace=True) df['Province/State'].replace('Jefferson County, WA', 'Washington', inplace=True) df['Province/State'].replace('Pierce County, WA', 'Washington', inplace=True) df['Province/State'].replace('Kittitas County, WA', 'Washington', inplace=True) df['Province/State'].replace('Grant County, WA', 'Washington', inplace=True) df['Province/State'].replace('Spokane County, WA', 'Washington', inplace=True) df['Province/State'].replace('Tempe, AZ', 'Arizona', inplace=True) df['Province/State'].replace('Maricopa County, AZ', 'Arizona', inplace=True) df['Province/State'].replace('Pinal County, AZ', 'Arizona', inplace=True) df['Province/State'].replace('Madison, WI', 'Wisconsin', inplace=True) df['Province/State'].replace('San Antonio, TX', 'Texas', inplace=True) df['Province/State'].replace('Lackland, TX', 'Texas', inplace=True) df['Province/State'].replace('Lackland, TX (From Diamond Princess)', 'Texas', inplace=True) df['Province/State'].replace('Harris County, TX', 'Texas', inplace=True) df['Province/State'].replace('Fort Bend County, TX', 'Texas', inplace=True) df['Province/State'].replace('Montgomery County, TX', 'Texas', inplace=True) df['Province/State'].replace('Collin County, TX', 'Texas', inplace=True) df['Province/State'].replace('Ashland, NE', 'Nebraska', inplace=True) df['Province/State'].replace('Omaha, NE (From Diamond Princess)', 'Nebraska', inplace=True) df['Province/State'].replace('Douglas County, NE', 'Nebraska', inplace=True) df['Province/State'].replace('Portland, OR', 'Oregon', inplace=True) df['Province/State'].replace('Umatilla, OR', 'Oregon', inplace=True) df['Province/State'].replace('Klamath County, OR', 'Oregon', inplace=True) df['Province/State'].replace('Douglas County, OR', 'Oregon', inplace=True) df['Province/State'].replace('Marion County, OR', 'Oregon', inplace=True) df['Province/State'].replace('Jackson County, OR ', 'Oregon', inplace=True) df['Province/State'].replace('Washington County, OR', 'Oregon', inplace=True) df['Province/State'].replace('Providence, RI', 'Rhode Island', inplace=True) df['Province/State'].replace('Providence County, RI', 'Rhode Island', inplace=True) df['Province/State'].replace('Grafton County, NH', 'New Hampshire', inplace=True) df['Province/State'].replace('Rockingham County, NH', 'New Hampshire', inplace=True) df['Province/State'].replace('Hillsborough, FL', 'Florida', inplace=True) df['Province/State'].replace('Sarasota, FL', 'Florida', inplace=True) df['Province/State'].replace('Santa Rosa County, FL', 'Florida', inplace=True) df['Province/State'].replace('Broward County, FL', 'Florida', inplace=True) df['Province/State'].replace('Lee County, FL', 'Florida', inplace=True) df['Province/State'].replace('Volusia County, FL', 'Florida', inplace=True) df['Province/State'].replace('Manatee County, FL', 'Florida', inplace=True) df['Province/State'].replace('Okaloosa County, FL', 'Florida', inplace=True) df['Province/State'].replace('Charlotte County, FL', 'Florida', inplace=True) df['Province/State'].replace('New York City, NY', 'New York', inplace=True) df['Province/State'].replace('Westchester County, NY', 'New York', inplace=True) df['Province/State'].replace('Queens County, NY', 'New York', inplace=True) df['Province/State'].replace('New York County, NY', 'New York', inplace=True) df['Province/State'].replace('Nassau, NY', 'New York', inplace=True) df['Province/State'].replace('Nassau County, NY', 'New York', inplace=True) df['Province/State'].replace('Rockland County, NY', 'New York', inplace=True) df['Province/State'].replace('Saratoga County, NY', 'New York', inplace=True) df['Province/State'].replace('Suffolk County, NY', 'New York', inplace=True) df['Province/State'].replace('Ulster County, NY', 'New York', inplace=True) df['Province/State'].replace('Fulton County, GA', 'Georgia', inplace=True) df['Province/State'].replace('Floyd County, GA', 'Georgia', inplace=True) df['Province/State'].replace('Polk County, GA', 'Georgia', inplace=True) df['Province/State'].replace('Cherokee County, GA', 'Georgia', inplace=True) df['Province/State'].replace('Cobb County, GA', 'Georgia', inplace=True) df['Province/State'].replace('Wake County, NC', 'North Carolina', inplace=True) df['Province/State'].replace('Chatham County, NC', 'North Carolina', inplace=True) df['Province/State'].replace('Bergen County, NJ', 'New Jersey', inplace=True) df['Province/State'].replace('Hudson County, NJ', 'New Jersey', inplace=True) df['Province/State'].replace('Clark County, NV', 'Nevada', inplace=True) df['Province/State'].replace('Washoe County, NV', 'Nevada', inplace=True) df['Province/State'].replace('Williamson County, TN', 'Tennessee', inplace=True) df['Province/State'].replace('Davidson County, TN', 'Tennessee', inplace=True) df['Province/State'].replace('Shelby County, TN', 'Tennessee', inplace=True) df['Province/State'].replace('Montgomery County, MD', 'Maryland', inplace=True) df['Province/State'].replace('Harford County, MD', 'Maryland', inplace=True) df['Province/State'].replace('Denver County, CO', 'Colorado', inplace=True) df['Province/State'].replace('Summit County, CO', 'Colorado', inplace=True) df['Province/State'].replace('Douglas County, CO', 'Colorado', inplace=True) df['Province/State'].replace('El Paso County, CO', 'Colorado', inplace=True) df['Province/State'].replace('Delaware County, PA', 'Pennsylvania', inplace=True) df['Province/State'].replace('Wayne County, PA', 'Pennsylvania', inplace=True) df['Province/State'].replace('Montgomery County, PA', 'Pennsylvania', inplace=True) df['Province/State'].replace('Fayette County, KY', 'Kentucky', inplace=True) df['Province/State'].replace('Jefferson County, KY', 'Kentucky', inplace=True) df['Province/State'].replace('Harrison County, KY', 'Kentucky', inplace=True) df['Province/State'].replace('Marion County, IN', 'Indiana', inplace=True) df['Province/State'].replace('Hendricks County, IN', 'Indiana', inplace=True) df['Province/State'].replace('Ramsey County, MN', 'Minnesota', inplace=True) df['Province/State'].replace('Carver County, MN', 'Minnesota', inplace=True) df['Province/State'].replace('Fairfield County, CT', 'Connecticut', inplace=True) df['Province/State'].replace('Charleston County, SC', 'South Carolina', inplace=True) df['Province/State'].replace('Spartanburg County, SC', 'South Carolina', inplace=True) df['Province/State'].replace('Kershaw County, SC', 'South Carolina', inplace=True) df['Province/State'].replace('Davis County, UT', 'Utah', inplace=True) df['Province/State'].replace('Honolulu County, HI', 'Hawaii', inplace=True) df['Province/State'].replace('Tulsa County, OK', 'Oklahoma', inplace=True) df['Province/State'].replace('Fairfax County, VA', 'Virginia', inplace=True) df['Province/State'].replace('St. Louis County, MO', 'Missouri', inplace=True) df['Province/State'].replace('Unassigned Location, VT', 'Vermont', inplace=True) df['Province/State'].replace('Bennington County, VT', 'Vermont', inplace=True) df['Province/State'].replace('Johnson County, IA', 'Iowa', inplace=True) df['Province/State'].replace('Jefferson Parish, LA', 'Louisiana', inplace=True) df['Province/State'].replace('Johnson County, KS', 'Kansas', inplace=True) df['Province/State'].replace('Washington, D.C.', 'District of Columbia', inplace=True) # Interpolate values for missing South Korea data on March 11 # (we skip this, but see the original https://github.com/raffg/covid-19/blob/master/eda.ipynb) # South Korea data on March 10 seems to be mislabled as North Korea df.loc[(df['Country/Region'] == 'North Korea') & (df['date'] == '03-10-2020'), 'Country/Region'] = 'South Korea' df.info() df # Re-order the columns for readability df = df[['date', 'Country/Region', 'Province/State', 'Confirmed', 'Deaths', 'Recovered', 'Latitude', 'Longitude']] # Fill missing values as 0; create Active cases column df['Confirmed'] = df['Confirmed'].fillna(0).astype(int) df['Deaths'] = df['Deaths'].fillna(0).astype(int) df['Recovered'] = df['Recovered'].fillna(0).astype(int) df['Active'] = df['Confirmed'] - (df['Deaths'] + df['Recovered']) # Replace missing values for latitude and longitude df['Latitude'] = df['Latitude'].fillna(df.groupby('Province/State')['Latitude'].transform('mean')) df['Longitude'] = df['Longitude'].fillna(df.groupby('Province/State')['Longitude'].transform('mean')) df.info() n_reg = len(df['Country/Region'].unique()) print('Number of unique Country/Region:', n_reg) df[df['Country/Region'] == 'US'].groupby(['date', 'Province/State'])[['Confirmed', 'Deaths', 'Recovered', 'Active']].sum() df[df['Country/Region'] == 'US'].groupby('date')[['Confirmed', 'Deaths', 'Recovered', 'Active']].sum() # fatality rate '{:.2f}%'.format(100 * df[df['date'] == df['date'].iloc[-1]]['Deaths'].sum() / df[df['date'] == df['date'].iloc[-1]]['Confirmed'].sum()) fig = go.Figure([go.Scatter(x=df[df['Country/Region'] == 'US'].groupby('date')['date'].first(), y=df[df['Country/Region'] == 'US'].groupby('date')['Active'].sum())]) fig.update_layout( title="US: Active COVID-19", xaxis_title="Date", yaxis_title="Active infected", font=dict( family="Courier New, monospace", size=16, color="#7f7f7f" ) ) fig.show() geo_us = df[(df['date'] == '2020-03-22') & (df['Country/Region'] == 'US')].groupby('Province/State', as_index=False).agg({'Longitude': 'mean', 'Latitude': 'mean'}) temp2 = pd.read_csv('./data/csse_covid_19_daily_reports.csv') df4 = temp2[temp2['Country/Region'] == 'US'].groupby('Province/State', as_index=False).agg({'Confirmed': 'sum'}) df4 = df4.merge(geo_us, left_on='Province/State', right_on='Province/State') fig = go.Figure(data=go.Scattergeo( lon = df4['Longitude'], lat = df4['Latitude'], text = df4['Province/State'] + ', ' + ': ' + df4['Confirmed'].astype(str), mode = 'markers', marker_size = (200 * df4['Confirmed'] / df4['Confirmed'].max()), marker = dict(reversescale = False, autocolorscale = False, symbol = 'circle', line = dict(width=1, color='rgba(102, 102, 102)'), colorscale = 'Reds', cmin = 0, color = df4['Confirmed'], cmax = df4['Confirmed'].max(), colorbar_title="Confirmed Cases"))) fig.update_layout(title = 'Number of cumulative confirmed cases in the US by state ', geo=dict(scope='usa', projection_type='albers usa', showland = True, landcolor = "rgb(100, 125, 100)", showocean = True, oceancolor = "rgb(150, 150, 250)", showcountries=True, showsubunits=True, showlakes=True,)) fig.show() eu = ['Albania', 'Andorra', 'Armenia', 'Austria', 'Azerbaijan', 'Belarus', 'Belgium', 'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Cyprus', 'Czech Republic', 'Denmark', 'Estonia', 'Finland', 'France', 'Georgia', 'Germany', 'Greece', 'Hungary', 'Iceland', 'Ireland', 'Italy', 'Kazakhstan', 'Kosovo', 'Latvia', 'Liechtenstein', 'Lithuania', 'Luxembourg', 'Malta', 'Moldova', 'Monaco', 'Montenegro', 'Netherlands', 'North Macedonia', 'Norway', 'Poland', 'Portugal', 'Romania', 'Russia', 'San Marino', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'Turkey', 'Ukraine', 'United Kingdom', 'Vatican City'] df3 = df[df['Country/Region'].isin(eu)] data = df3[df3['date'] == df3['date'].iloc[-1]].groupby('Country/Region').agg({'Active': 'sum', 'Longitude': 'mean', 'Latitude': 'mean', 'Country/Region': 'first', 'Province/State': 'first'}) data.loc[data['Country/Region'] == 'France', 'Latitude'] = 46.2276 data.loc[data['Country/Region'] == 'France', 'Longitude'] = -3.4360 data.loc[data['Country/Region'] == 'United Kingdom', 'Latitude'] = 55.3781 data.loc[data['Country/Region'] == 'United Kingdom', 'Longitude'] = 3.4360 data.loc[data['Country/Region'] == 'Denmark', 'Latitude'] = 56.2639 data.loc[data['Country/Region'] == 'Denmark', 'Longitude'] = 9.5018 data.loc[data['Country/Region'] == 'Netherlands', 'Latitude'] = 52.1326 data.loc[data['Country/Region'] == 'Netherlands', 'Longitude'] = 5.2913 fig = go.Figure(data=go.Scattergeo( lon = data['Longitude'], lat = data['Latitude'], text = data['Country/Region'] + ', ' + data['Country/Region'] + ': ' + data['Active'].astype(str), mode = 'markers', marker_size = (100 * data['Active'] / data['Active'].max()), marker = dict(reversescale = False, autocolorscale = False, symbol = 'circle', line = dict(width=1, color='rgba(102, 102, 102)'), colorscale = 'Reds', cmin = 0, color = data['Active'], cmax = data['Active'].max(), colorbar_title="Active Cases"))) fig.update_layout(title = 'Number of active cases by European country ', geo=dict(scope='europe', projection_type="natural earth", showland = True, landcolor = "rgb(100, 125, 100)", showocean = True, oceancolor = "rgb(150, 150, 250)", showcountries=True, showsubunits=True, showlakes=False,)) fig.show() from IPython.display import Image Image('./assets/active_cases_eu.png', width=600) ``` ## Focus on the epidemiological trajectories in Norway ``` df0 = df[df['Country/Region'] == 'Norway'] df0.head() df0.tail() df1 = df[df['Country/Region'] == 'Norway'].groupby('date')[['Confirmed', 'Deaths', 'Recovered', 'Active']].sum() df1.head() df1.tail() ``` ## Case fatality rate [CFR](https://en.wikipedia.org/wiki/Case_fatality_rate) ``` def fatality_rate_given_country(csse_daily_df, country): dfc = csse_daily_df[csse_daily_df['Country/Region'] == country] last = dfc['date'].iloc[-1] cfr = dfc[dfc['date'] == last]['Deaths'].sum() / dfc[dfc['date'] == last]['Confirmed'].sum() active = dfc[dfc['date'] == last]['Active'].sum() confirmed = dfc[dfc['date'] == last]['Confirmed'].sum() return last, cfr, active, confirmed countrylist = ['Norway', 'Sweden', 'Denmark', 'Iceland', 'China', 'Italy', 'US'] print('Case fatality rate (accumulated Deaths/accumulated Confirmed) for given country:\n') for i, c in enumerate(countrylist): last, cfr, active, confirmed = fatality_rate_given_country(df, c) print('%s (upto %s) = %.2f%% (confirmed=%d, active=%d)' % (c, last, cfr*100, confirmed, active)) last, cfr, active, confirmed = fatality_rate_given_country(df, 'Norway') fig = go.Figure([go.Scatter(x=df[df['Country/Region'] == 'Norway'].groupby('date')['date'].first(), y=df[df['Country/Region'] == 'Norway'].groupby('date')['Active'].sum())]) fig.update_layout( title="NORWAY: Active COVID-19 (CFR=%.2f%%)" % (cfr*100), xaxis_title="Date", yaxis_title="Active infected", font=dict( family="Courier New, monospace", size=16, color="#7f7f7f" ) ) fig.show() from IPython.display import Image Image('./assets/active_cases_cfr_norway.png', width=600) region = 'Norway' fig = go.Figure() fig.add_trace(go.Scatter( x=df[df['Country/Region'] == region].groupby('date')['date'].first(), y=df[df['Country/Region'] == region].groupby('date')['Active'].sum(), name="Active cases")) fig.add_trace(go.Scatter( x=df[df['Country/Region'] == region].groupby('date')['date'].first(), y=df[df['Country/Region'] == region].groupby('date')['Confirmed'].sum(), name="Total Confirmed")) fig.add_trace(go.Scatter( x=df[df['Country/Region'] == region].groupby('date')['date'].first(), y=df[df['Country/Region'] == region].groupby('date')['Deaths'].sum(), name="Deaths")) fig.add_trace(go.Scatter( x=df[df['Country/Region'] == region].groupby('date')['date'].first(), y=df[df['Country/Region'] == region].groupby('date')['Recovered'].sum(), name="Recovered")) fig.update_layout(title="COVID-19 infections in {}".format(region), xaxis_title="Date", yaxis_title="Number of Individuals") fig.show() fig = go.Figure() countries = ['China', 'Italy', 'South Korea', 'US', 'Spain', 'France', 'Germany', 'Norway'] for country in countries: fig.add_trace(go.Scatter( x=df[df['Country/Region'] == country].groupby('date')['date'].first(), y=df[df['Country/Region'] == country].groupby('date')['Active'].sum(), name=country, opacity=0.8)) fig.update_layout(title="Active COVID-19 cases", xaxis_title="Date", yaxis_title="Number of Individuals") fig.show() from IPython.display import Image Image('./assets/active_cases_selected_countries.png', width=600) fig = go.Figure() for region in ['China', 'Italy', 'US', 'Spain', 'France', 'Germany', 'South Korea', 'Norway']: fig.add_trace(go.Scatter( x=df[df['Country/Region'] == region].groupby('date')['date'].first(), y=df[df['Country/Region'] == region].groupby('date')['Active'].sum(), name=region, hoverinfo='x+y+z+text+name', stackgroup='one')) fig.update_layout(title="COVID-19 Active Cases Worldwide", xaxis_title="Date", yaxis_title="Number of Individuals") fig.show() ```
github_jupyter
# Script to create vector tiles for direct usage in LPVIS - Input: AMA produced shapefiles with parcels and physical blocks downloaded from https://www.data.gv.at/katalog/dataset/invekos-schlaege-oesterreich/resource/26e5b6c4-6e47-45d3-ac65-728c631fd515 and https://www.data.gv.at/katalog/dataset/invekos-referenzen-oesterreich/resource/e883c5d0-ea79-44be-b789-d8bd6a3389e3, bounding box - Output: zoom 14-16 uncompressed pbf vector tiles used in LPvis in two folders wrapped in .tar.gz These should be manually moved to LPvis local folder and committed to GitHub. ``` from shapely.geometry import Polygon import re, time, progressbar import pandas as pd import geopandas as gpd from pyproj import Proj, transform import subprocess, os # Needs `tippecanoe` installed # Modify input/output/bbox as necessary bbox = [15.4, 48.54, 15.96, 48.92] input_parcels_file = 'data/invekos_schlaege_polygon.shp' input_blocks_file = 'data/invekos_referenzen_polygon.shp' output_parcels_file = 'data/agricultural_parcels.gpkg' output_physical_blocks_file = 'data/physical_blocks.gpkg' parcels_directory = 'agricultural_parcels' blocks_directory = 'physical_blocks' print('Reading data parcels') gdf = gpd.read_file(input_parcels_file) inProj = Proj('epsg:4326') outProj = Proj(gdf.crs['init']) # switched order of x,y on purpose y1_t,x1_t = transform(inProj,outProj,bbox[1],bbox[0]) y2_t,x2_t = transform(inProj,outProj,bbox[3],bbox[2]) print('Performing spatial subset') spatial_subset = gdf.cx[x1_t:x2_t, y1_t:y2_t] print('Converting to mercator for tippecanoe') spatial_subset_m = spatial_subset.to_crs('epsg:4326') # variable substitution / classification group merging when creating vector tiles for eurodatacube/LPVIS def replace_umlauts(s): s = re.sub(r"(\b|[^AEIOU])AE", r'\1Ä', s) s = re.sub(r"(\b|[^AEIOU])OE", r'\1Ö', s) s = re.sub(r"(\b|[^AEIOU])UE", r'\1Ü', s) return s print('Merging with classification groups from xls file') renamed = spatial_subset_m.rename(columns={'SNAR_BEZEI':'CT'}) lut = pd.read_excel('data/2_LUT_list_Version2_20190724.xlsx') lut.CT = lut.CT.apply(replace_umlauts) merged = renamed.merge(lut, how='left', on='CT') final_data = merged[['ID', 'Ctnum','FS_KENNUNG','CTnumL4A', 'CT', 'SL_FLAECHE', 'geometry']] try: print('Deleting output gpkg file, to not append.') os.unlink(output_parcels_file) except: pass final_data.to_file(output_parcels_file, driver="GPKG") print('Data successfully written!') print('Creating vector tiles') fio = subprocess.Popen(('fio', 'cat', output_parcels_file), stdout=subprocess.PIPE) tippecanoe = subprocess.check_output(('tippecanoe', '--output-to-directory=%s' % parcels_directory, '--drop-densest-as-needed', '--read-parallel', '--minimum-zoom=14', '--maximum-zoom=16', '-l', 'agricultural_parcels', '-f', '--no-tile-compression'), stdin=fio.stdout) fio.wait() print('Reading data physical') gdf = gpd.read_file(input_blocks_file) print('Performing spatial subset') spatial_subset = gdf.cx[x1_t:x2_t, y1_t:y2_t] print('Converting to mercator for tippecanoe') spatial_subset_m = spatial_subset.to_crs('epsg:4326') try: print('Deleting output gpkg file, to not append.') os.unlink(output_physical_blocks_file) except: pass spatial_subset_m.to_file(output_physical_blocks_file, driver="GPKG") print('Data successfully written!') print('Creating vector tiles') fio = subprocess.Popen(('fio', 'cat', output_physical_blocks_file), stdout=subprocess.PIPE) tippecanoe = subprocess.check_output(('tippecanoe', '--output-to-directory=%s' % blocks_directory, '--drop-densest-as-needed', '--read-parallel', '--minimum-zoom=14', '--maximum-zoom=16', '-l', 'physical_blocks', '-f', '--no-tile-compression'), stdin=fio.stdout) fio.wait() # for LPvis repository, the files are stored as compressed archive for storage reasons for tile_dir in [parcels_directory, blocks_directory]: tar = subprocess.run(['tar', '-czvf', '%s.tar.gz' % tile_dir, tile_dir]) ```
github_jupyter
# Maximum Likelihood and Maximum A Posterior * We looked at the regularization term as a *penalty* term in the objective function. There is another way to interpret the regularization term as well. Specifically, there is a *Bayesian* interpretation. \begin{eqnarray} \min E^{\ast}(\mathbf{w}) &=& \max -E^{\ast}(\mathbf{w})\\ & =& \max \exp \left\{ -E^{\ast}(\mathbf{w})\right\}\\ &=& \max \exp \left\{ -\frac{1}{2}\sum_{n=1}^N \left( y(x_n, \mathbf{w}) - t_n \right)^2 - \frac{\lambda}{2}\left\| \mathbf{w} \right\|^2_2 \right\}\\ &=& \max \exp \left\{ -\frac{1}{2}\sum_{n=1}^N \left( y(x_n, \mathbf{w}) - t_n \right)^2 \right\}\exp\left\{-\frac{1}{2}\lambda\left\| \mathbf{w} \right\|^2_2\right\}\\ &=& \max \prod_{n=1}^N \exp \left\{ -\frac{1}{2} \left( y(x_n, \mathbf{w}) - t_n \right)^2 \right\}\exp\left\{-\frac{1}{2}\lambda\left\| \mathbf{w} \right\|^2_2\right\} \end{eqnarray} * So, this is a maximization of the *data likelihood* with a *prior*: $p(\mathbf{X}|\mathbf{w})p(\mathbf{w})$ * *Method of Maximum Likelihood:* * A *data likelihood* is how likely the data is given the parameter set * So, if we want to maximize how likely the data is to have come from the model we fit, we should find the parameters that maximize the likelihood * A common trick to maximizing the likelihood is to maximize the log likelihood. Often makes the math much easier. *Why can we maximize the log likelihood instead of the likelihood and still get the same answer?* * Consider: $\max \ln \exp \left\{ -\frac{1}{2}\left(y(x_n, \mathbf{w}) - t_n\right)^2\right\}$ We go back to our original objective. * *Method of Maximum A Posteriori (MAP):* * Bayes Rule: $p(Y|X) = \frac{p(X|Y)p(Y)}{p(X)}$ * Consider: $p(\mathbf{w}|\mathscr{D}) = \frac{p(\mathscr{D}|\mathbf{w})p(\mathbf{w})}{p(\mathscr{D})}$, i.e., posterior $\propto$ likelihood $\times$ prior ## The Gaussian Distribution: * Consider a univariate Gaussian distribution: \begin{equation} \mathscr{N}(x|\mu, \sigma^2) = \frac{1}{\sqrt{2\pi \sigma^2}}\exp\left\{ -\frac{1}{2}\frac{(x-\mu)^2}{\sigma^2} \right\} \end{equation} * $\sigma^2$ is the variance OR $\frac{1}{\sigma^2}$ is the *precision* * So, as $\lambda$ gets big, variance gets smaller/tighter. As $\lambda$ gets small, variance gets larger/wider. * The Gaussian distribution is also called the *Normal* distribution. * We will often write $N(x|\mu, \sigma^2)$ to refer to a Gaussian with mean $\mu$ and variance $\sigma^2$. * *What is the multi-variate Gaussian distribution?* * What is the expected value of $x$ for the Gaussian distribution? \begin{eqnarray} E[x] &=& \int x p(x) dx \\ &=& \int x \frac{1}{\sqrt{2\pi \sigma^2}}\exp\left\{ -\frac{1}{2}\frac{(x-\mu)^2}{\sigma^2} \right\} dx \end{eqnarray} * *Change of variables:* Let \begin{eqnarray} y &=& \frac{x-\mu}{\sigma} \rightarrow x = \sigma y + \mu\\ dy &=& \frac{1}{\sigma} dx \rightarrow dx = \sigma dy \end{eqnarray} * Plugging this into the expectation: \begin{eqnarray} E[x] &=& \int \left(\sigma y + \mu \right)\frac{1}{\sqrt{2\pi}\sigma} \exp\left\{ - \frac{1}{2} y^2 \right\} \sigma dy \\ &=& \int \frac{\sigma y}{\sqrt{2\pi}} \exp\left\{ - \frac{1}{2} y^2 \right\} dy + \int \frac{\mu}{\sqrt{2\pi}} \exp\left\{ - \frac{1}{2} y^2 \right\} dy \end{eqnarray} * The first term is an odd function: $f(-y) = -f(y)$ So, $E[x] = 0 + \mu = \mu$ ## Maximum Likelihood vs. Maximum A Posteriori (MAP) * Lets look at this in terms of binary variables, e.g., Flipping a coin: $X =1$ is heads, $X=0$ is tails * Let $\mu$ be the probability of heads. If we know $\mu$, then: $P(x = 1 |\mu) = \mu$ and $P(x = 0|\mu) = 1-\mu$ \begin{eqnarray} P(x|\mu) = \mu^x(1-\mu)^{1-x} = \left\{\begin{array}{c c}\mu & \text{ if } x=1 \\ 1-\mu & \text{ if } x = 0 \end{array}\right. \end{eqnarray} * This is called the *Bernoulli* distribution. The mean and variance of a Bernoulli distribution is: \begin{equation} E[x] = \mu \end{equation} \begin{equation} E\left[(x-\mu)^2\right] = \mu(1-\mu) \end{equation} * So, suppose we conducted many Bernoulli trials (e.g., coin flips) and we want to estimate $\mu$ ### Method: Maximum Likelihood \begin{eqnarray} p(\mathscr{D}|\mu) &=& \prod_{n=1}^N p(x_n|\mu) \\ &=& \prod_{n=1}^N \mu^{x_n}(1-\mu)^{1-x_n} \end{eqnarray} * Maximize : (*What trick should we use?*) \begin{eqnarray} \mathscr{L} = \sum_{n=1}^N x_n \ln \mu + (1-x_n)\ln(1-\mu) \end{eqnarray} \begin{eqnarray} \frac{\partial \mathscr{L}}{\partial \mu} = 0 &=& \frac{1}{\mu}\sum_{n=1}^N x_n - \frac{1}{1-\mu }\sum_{n=1}^N (1 - x_n)\\ 0 &=& \frac{(1-\mu) \sum_{n=1}^N x_n - \mu \sum_{n=1}^N (1- x_n)}{\mu(1-\mu)}\\ 0 &=& \sum_{n=1}^N x_n - \mu \sum_{n=1}^N x_n - \mu \sum_{n=1}^N 1 + \mu \sum_{n=1}^N x_n\\ 0 &=& \sum_{n=1}^N x_n - \mu N\\ \mu &=& \frac{1}{N}\sum_{n=1}^N x_n = \frac{m}{N} \end{eqnarray} where $m$ is the number of successful trials. * So, if we flip a coin 1 time and get heads, then $\mu = 1$ and probability of getting tails is 0. *Would you believe that? We need a prior!* ### Method: Maximum A Posteriori: * Look at several independent trials. Consider N = 3 and m = 2 (N is number of trials, m is number of successes) and look at all ways to get 2 H and 1 T: * H H T $\rightarrow \mu \mu (1-\mu) = \mu^2(1-\mu)$ * H T H $\rightarrow \mu (1-\mu) \mu = \mu^2(1-\mu)$ * T H H $\rightarrow (1-\mu) \mu \mu = \mu^2(1-\mu)$ * $\left(\begin{array}{c} 3 \\ 2 \end{array}\right) \mu^2(1-\mu) \rightarrow \left(\begin{array}{c} N \\ m \end{array}\right) \mu^m(1-\mu)^{N-m} = \frac{N!}{(N-m)!m!}\mu^m(1-\mu)^{N-m} $ * This is the Binomial Distribution, gives the probability of $m$ observations of $x=1$ out of N independent trails * So, what we saw is that we need a prior. We want to incorporate our prior belief. Let us place a prior on $\mu$ \begin{equation} Beta(\mu|a,b) = \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)}\mu^{a-1}(1-\mu)^{b-1} \end{equation} \begin{equation} E[\mu] = \frac{a}{a + b} \end{equation} \begin{equation} Var[\mu] = \frac{ab}{(a+b)^2(a+b+1)} \end{equation} * Note: $\Gamma(x) = \int_0^\infty u^{x-1}e^{-u} du$ and when $x$ is an integer, then it simplifys to $x!$ * Calculation of the posterior, Take $N = m + l$ observations: \begin{eqnarray} p(\mu | m, l, a, b) &\propto& Bin(m,l|\mu)Beta(\mu|a,b) \\ &\propto& \mu^m(1-\mu)^l\mu^{a-1}(1-\mu)^{b-1}\\ &=& \mu^{m+a-1}(1-\mu)^{l+b-1} \end{eqnarray} * What does this look like? Beta: $a \leftarrow m+a$, $b \leftarrow l+b$ * So, what's the posterior? \begin{equation} p(\mu | m, l, a, b) = \frac{\Gamma(m+a+l+b)}{\Gamma(m+a)\Gamma(l+b)}\mu^{m+a-1}(1-\mu)^{l+b-1} \end{equation} * *Conjugate Prior Relationship:* When the posterior is the same form as the prior * Now we can maximize the (log of the) posterior: \begin{eqnarray} \max_\mu ((m+a-1) \ln \mu + (l+b-1) \ln (1-\mu)) \end{eqnarray} \begin{eqnarray} \frac{\partial \mathscr{L}}{\partial \mu} = 0&=& \frac{m + a -1}{\mu} - \frac{l + b - 1}{1-\mu}\\ &=& (1-\mu)(m+a-1) - \mu(l+b-1)\\ &=& (m+a-1) - \mu(m+a-1) - \mu(l+b-1)\\ \mu &=& \frac{m+a-1}{m+a+l+b-2} \end{eqnarray} * This is the MAP solution. *So, what happens now when you flip one heads, two heads, etc.?* * Discuss online updating of the prior. Eventually the data takes over the prior. ``` import numpy as np import matplotlib.pyplot as plt import math %matplotlib inline def plotBeta(a=2,b=2): '''plotBeta(a=1,b=1): Plot plot beta distribution with parameters a and b''' xrange = np.arange(0,1,0.001) #get equally spaced points in the xrange normconst = math.gamma(a+b)/(math.gamma(a)*math.gamma(b)) beta = normconst*xrange**(a-1)*(1-xrange)**(b-1) fig = plt.figure() p1 = plt.plot(xrange,beta, 'g') plt.show() #Beta Distribution plotBeta(2,4); trueMu = 0.5 numFlips = 10 priorA = 2 priorB = 2 flipResult = [] for flip in range(numFlips): flipResult.append(np.random.binomial(1,trueMu,1)[0]) print(flipResult) print('Frequentist/Maximum Likelihood Probability of Heads:' + str(sum(flipResult)/len(flipResult))) print('Bayesian/MAP Probability of Heads:' + str((sum(flipResult)+priorA-1)/(len(flipResult)+priorA+priorB-2))) input("Hit enter to continue...\n") ```
github_jupyter
# Collaborative filtering > Using the fastai library for collaborative filtering. ``` from fastai2.tabular.all import * from fastai2.collab import * # all_slow ``` This tutorial highlights how quickly build a `Learner` and train a model on collaborative filtering tasks. ## Training a model For this tutorial, we will use the [Movielens 100k data dataset](https://grouplens.org/datasets/movielens/100k/). We can download it easily and decompress it with the following function: ``` path = untar_data(URLs.ML_100k) ``` The main table is in `u.data`. Since it's not a proper csv, we have to specify a few things while opening it: the tab delimiter, the columns we want to keep and their names. ``` ratings = pd.read_csv(path/'u.data', delimiter='\t', header=None, usecols=(0,1,2), names=['user','movie','rating']) ratings.head() ``` Movie ids are not ideal to look at things, so we load the correspondence movie id to title that is in the table `u.item`: ``` movies = pd.read_csv(path/'u.item', delimiter='|', encoding='latin-1', usecols=(0,1), names=('movie','title'), header=None) movies.head() ``` Next we merge it to our ratings table: ``` ratings = ratings.merge(movies) ratings.head() ``` We can then build a `DataLoaders` object from this table. By default, it takes the first column for user, the second column for the item (here our movies) and the third column for the ratings. We need to change the value of `item_name` in our case, to use the titles instead of the ids: ``` dls = CollabDataLoaders.from_df(ratings, item_name='title', bs=64) ``` In all applications, when the data has been assembled in a `DataLoaders`, you can have a look at it with the `show_batch` method: ``` dls.show_batch() ``` fastai can create and train a collaborative filtering model by using `collab_learner`: ``` learn = collab_learner(dls, n_factors=50, y_range=(0, 5.5)) ``` It uses a simple dot product model with 50 latent factors. To train it using the 1cycle policy, we just run this command: ``` learn.fit_one_cycle(5, 5e-3, wd=0.1) ``` Here's [some benchmarks](https://www.librec.net/release/v1.3/example.html) on the same dataset for the popular Librec system for collaborative filtering. They show best results based on RMSE of 0.91 (scroll down to the 100k dataset), which corresponds to an MSE of `0.91**2 = 0.83`. So in less than a minute, we got pretty good results! ## Interpretation Let's analyze the results of our previous model. We will keep the 1000 most rated movied for this: ``` g = ratings.groupby(title)['rating'].count() top_movies = g.sort_values(ascending=False).index.values[:1000] top_movies[:10] ``` ### Movie bias Our model has learned one bias per movie, a unique number independent of users that can be interpreted as the intrinsic "value" of the movie. We can grab the bias of each movie in our `top_movies` list with the following conmmand: ``` movie_bias = learn.model.bias(top_movies, is_item=True) movie_bias.shape ``` Let's compare those bias with the average ratings: ``` mean_ratings = ratings.groupby(title)['rating'].mean() movie_ratings = [(b, i, mean_ratings.loc[i]) for i,b in zip(top_movies,movie_bias)] ``` Now let's have a look at the movies with the worst bias: ``` item0 = lambda o:o[0] sorted(movie_ratings, key=item0)[:15] ``` Or the ones with the best bias: ``` sorted(movie_ratings, key=lambda o: o[0], reverse=True)[:15] ``` There is certainly a storng correlation! ### Movie weights Now let's try to analyze the latent factors our model learned. We can grab the weights for each movie in `top_movies` the same way as we did the bias we before. ``` movie_w = learn.model.weight(top_movies, is_item=True) movie_w.shape ``` Let's try a PCA to reduce the dimensions and see if we can see what the model learned: ``` movie_pca = movie_w.pca(3) movie_pca.shape fac0,fac1,fac2 = movie_pca.t() movie_comp = [(f, i) for f,i in zip(fac0, top_movies)] ``` Here are the highest score on the first dimension: ``` sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] ``` And the worst: ``` sorted(movie_comp, key=itemgetter(0))[:10] ``` Same thing for our second dimension: ``` movie_comp = [(f, i) for f,i in zip(fac1, top_movies)] sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] sorted(movie_comp, key=itemgetter(0))[:10] ``` And we can even plot the movies according to their scores on those dimensions: ``` idxs = np.random.choice(len(top_movies), 50, replace=False) idxs = list(range(50)) X = fac0[idxs] Y = fac2[idxs] plt.figure(figsize=(15,15)) plt.scatter(X, Y) for i, x, y in zip(top_movies[idxs], X, Y): plt.text(x,y,i, color=np.random.rand(3)*0.7, fontsize=11) plt.show() ```
github_jupyter
``` import matplotlib.pyplot as plt import numpy as np %matplotlib inline ``` # Summary ## scikit-learn API ``X`` : data, 2d numpy array or scipy sparse matrix of shape (n_samples, n_features) ``y`` : targets, 1d numpy array of shape (n_samples,) <table> <tr style="border:None; font-size:20px; padding:10px;"><th colspan=2>``model.fit(X_train, [y_train])``</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><th>``model.predict(X_test)``</th><th>``model.transform(X_test)``</th></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Classification</td><td>Preprocessing</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Regression</td><td>Dimensionality Reduction</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Clustering</td><td>Feature Extraction</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>&nbsp;</td><td>Feature selection</td></tr> </table> ## Model evaluation and parameter selection ``` from sklearn.datasets import load_digits from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import cross_val_score digits = load_digits() X, y = digits.data / 16., digits.target cross_val_score(LogisticRegression(), X, y, cv=5) from sklearn.grid_search import GridSearchCV from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y) grid = GridSearchCV(LogisticRegression(), param_grid={'C': np.logspace(-3, 2, 6)}) grid.fit(X_train, y_train) grid.score(X_test, y_test) ``` ## Model complexity, overfitting, underfitting ![underfitting and overfitting](overfitting_underfitting_cartoon.svg) ## Pipelines ``` from sklearn.pipeline import make_pipeline from sklearn.feature_selection import SelectKBest pipe = make_pipeline(SelectKBest(k=59), LogisticRegression()) pipe.fit(X_train, y_train) pipe.score(X_test, y_test) ``` ## Scoring metrics ``` cross_val_score(LogisticRegression(C=.01), X, y == 3, cv=5) cross_val_score(LogisticRegression(C=.01), X, y == 3, cv=5, scoring="roc_auc") ``` ## Data Wrangling ``` from sklearn.preprocessing import OneHotEncoder X = np.array([[15.9, 1], # from Tokyo [21.5, 2], # from New York [31.3, 0], # from Paris [25.1, 2], # from New York [63.6, 1], # from Tokyo [14.4, 1], # from Tokyo ]) y = np.array([0, 1, 1, 1, 0, 0]) encoder = OneHotEncoder(categorical_features=[1], sparse=False) pipe = make_pipeline(encoder, LogisticRegression()) pipe.fit(X, y) pipe.score(X, y) ``` ## Out-of-core Learning <img src="partial_fit.svg" width="80%">
github_jupyter
# Build and deploy the docker container Ensure this notebook is running above the "container" folder containing the dockerfile. ``` %%sh # The name of our algorithm algorithm_name=sagemaker-word2vec cd container chmod +x decision_trees/train chmod +x decision_trees/serve account=$(aws sts get-caller-identity --query Account --output text) # Get the region defined in the current configuration (default to us-west-1 if none defined) region=$(aws configure get region) region=${region:-us-east-1} fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest" # If the repository doesn't exist in ECR, create it. aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1 if [ $? -ne 0 ] then aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null fi # Get the login command from ECR and execute it directly $(aws ecr get-login --region ${region} --no-include-email) # Build the docker image locally with the image name and then push it to ECR # with the full name. docker build -t ${algorithm_name} . docker tag ${algorithm_name} ${fullname} docker push ${fullname} # S3 prefix prefix = 'word2vec' # Define IAM role import boto3 import re import os import numpy as np import pandas as pd from sagemaker import get_execution_role role = get_execution_role() # tar the model file: !tar -czvf model.tar.gz ./word2vec_2.model from sagemaker.session import Session model_data = Session().upload_data(path='model.tar.gz', key_prefix='model') print(model_data) import sagemaker as sage from time import gmtime, strftime sess = sage.Session() # generating the image path account = sess.boto_session.client('sts').get_caller_identity()['Account'] region = sess.boto_session.region_name image = '{}.dkr.ecr.{}.amazonaws.com/sagemaker-word2vec:latest'.format(account, region) # to deploy the model, you need to have the model created based on your model artifacts, # create an endpoint configuration and then create the endpoint based on the two. # first let's create the model client = boto3.client('sagemaker') ModelName='TestCx-BYOA' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) first_model = client.create_model( ModelName=ModelName, PrimaryContainer={ 'Image': image, 'ModelDataUrl': 's3://sagemaker-us-east-1-639634733305/model/model.tar.gz' # note that the model.tar.gz file is a tarball of our word2vec_2.model file }, ExecutionRoleArn=role) # create the endpoint configuration endpoint_config_name = 'TestCx-BYOA-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print(endpoint_config_name) create_endpoint_config_response = client.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType':'ml.m4.xlarge', 'InitialVariantWeight':1, 'InitialInstanceCount':1, 'ModelName':ModelName, 'VariantName':'AllTraffic'}]) print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn']) # create the endpoint endpoint_name = 'TestCx-BYOA-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print(endpoint_name) create_endpoint_response = client.create_endpoint( EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name) print(create_endpoint_response['EndpointArn']) from sagemaker.predictor import csv_serializer, json_deserializer, csv_deserializer from sagemaker.predictor import RealTimePredictor predictor = RealTimePredictor(endpoint = endpoint_name, sagemaker_session=sess, serializer=csv_serializer, deserializer=csv_deserializer) # client.delete_endpoint(EndpointName=endpoint_name) #dir(first_model) coen_bros = "0116282,2042568,1019452,1403865,190590,138524,335245,477348,887883,101410" def run_predictions(arr_input, batch_size): predictions=[] if type(arr_input) != np.ndarray: arr_input = np.array(arr_input) for arr in np.array_split(arr_input, batch_size): if arr.size > 0: print("Shape:{0}".format(arr.shape)) resule = predictor.predict(arr) result = result.decode("utf-8") result = result.split(',') predictions += [np.expm1(float(r)) for r in result] return predictions import pandas as pd test_df = pd.DataFrame(data=['116282', '2042568', '1019452', '1403865', '190590', '138524', '335245', '477348', '887883', '101410']) print(predictor.predict(coen_bros)) test_input import pandas as pd test_df = pd.DataFrame(data=['116282', '2042568', '1019452', '1403865', '190590', '138524', '335245', '477348', '887883', '101410']) test_input = ['116282', '2042568', '1019452', '1403865', '190590', '138524', '335245', '477348', '887883', '101410'] test_df.values test_df[0] = test_df[0].astype(str) [[str(x) for x in lst] for lst in test_df] test_df.dtypes predictor.predict(test_input) import pandas as pd df = open('./data/word2vec_test_data.csv', "r+") df predictor = RealTimePredictor(endpoint = "TestCx-BYOA-2020-01-21-22-14-57", sagemaker_session=sess, serializer=csv_serializer) data = data['movieid'] data data map(int, data) df = df.to_json() df predictor.predict(df) ```
github_jupyter
``` print('Hello world') from IPython.display import Image from IPython.core.display import HTML ``` # Tervetuloa opintojaksolle Johdanto datatieteeseen Tiedot opintojakson suorittamisesta löytyy <a href="https://jodatut.github.io/2020/">GitHub:sta</a>. Luennoijana toimii <a href="https://www.tuni.fi/fi/jukka-huhtamaki">Jukka Huhtamäki</a> ([@jnkka](https://twitter.com/jnkka)). Opetusassistenttina toimii <a href="https://tutcris.tut.fi/portal/fi/persons/erjon-skenderi(133f72d8-69a1-4bfb-b48b-35ba71c3322e).html">Erjon Skenderi</a>. Luentomuistion valmisteli [Arho Suominen](https://www.tuni.fi/fi/ajankohtaista/kun-teknologia-muuttuu-yrityksen-taytyy-loytaa-keinot-sopeutua-muutokseen). ## Odotukset keväälle - mitä ihmettä on Johdanto datatieteeseen! "Data Scientist" voidaan kääntää suomeksi tietojen tutkija, vai voidaanko. Mitä tarkoitetaan datatieteellä ja mitä odotuksia opiskelijoilla on opintojaksolle. ![Image of Data Scientiest profile](https://upload.wikimedia.org/wikipedia/commons/7/7f/Data_scientist_Venn_diagram.png) Datatiede rakentuu neljän laajan kokonaisuuden varaan: - liiketoimintaosaaminen, - ohjelmointi- ja tietokantaosaaminen, - tilastollinen analyysi ja - datalähtöinen viestintä ja visualisointi. Opiskelijoilta toivotaan perusosaamista näiltä aloilta. Opintojaksolla on tavoitteena syventyä näihin aiheisiin datatieteen näkökulmasta sekä esitellä opiskelijoille riittävät tiedot datatiedeosaamiseen kuuluvien taitojen hankkimiseen TTY:n opetustarjonnasta. ## Suorittaminen Suorittamisohjeet löytyvät <a href="https://jodatut.github.io/2020/suorittaminen/">opintojakson kotisivulta</a> ## Harjoitukset ja harjoitustyöt Ohjeet harjoitustyöhön suorittamiseksi löytyvät <a href="https://jodatut.github.io/2020/harjoitustyo/">opintojakson kotisivulta</a> # Mitä on datatiede? ## Määritelmä Tietojen tutkijan tai datatieteilijän (Data Scientist) rooli organisaatiossa on moninainen. Työtä on kuvattu monitieteiseksi, yhdistäen ainakin tietoteknistä, matemaattista ja liiketoiminnallista osaamista. [Harvard Business Review:n artikkeli Data Scientist: The Sexiest Job of the 21st Century](https://hbr.org/2012/10/data-scientist-the-sexiest-job-of-the-21st-century) "...he started to see possibilities. He began forming theories, testing hunches, and finding patterns that allowed him to predict whose networks a given profile would land in. He could imagine that new features capitalizing on the heuristics he was developing might provide value to users." Alunperin datatieteestä käytettiin termiä [datalogy](https://dl.acm.org/citation.cfm?id=366510). Mielenkiintoista luettavaa on esimerkiksi Sveindottir ja Frøkjær [teksti](https://link.springer.com/article/10.1007/BF01941128) datalogy-termin kehittäjä Naurin luomasta Kööpenhaminalaisesta tietotekniikan koulutuksen traditiosta. Itselle datatieteessä tuntui, osin virheellisesti, keskeiseltä tilastotieteen osaaminen. Osa on jopa mennyt niin pitkälle että ovat pitäneet tilastotiedettä merkittäviltä osin [samana](http://www2.isye.gatech.edu/~jeffwu/presentations/datascience.pdf) kuin datatiede. On kuitenkin selvää että tämä hyvin kapea käsitys ei kuvaa datatiedettä riittävällä tavalla, vaan tilastotiede on nähtävä yhtenä osana tietojen tutkijan [osaamista](https://arxiv.org/ftp/arxiv/papers/1410/1410.3127.pdf). Datatieteessä keskeistä on kyky muokata laajoja aineistoja sekä hyödyntää "ohjelmointia". Erimerkkinä voidaan pitää muutosta pois tilasto-ohjelmista kuten [R](https://fi.wikipedia.org/wiki/R_(ohjelmointikieli)) kohti ohjelmointikieliä kuten [Python](https://fi.wikipedia.org/wiki/Python_(ohjelmointikieli)). Molemmat ovat käytännössä "ohjelmointikieliä", mutta R fokusoi nimenomaisesti tilastolliseen laskentaa ja grafiikan tuottamiseen. Mikä on siis muutos, joka on tapahtunut kun Python kasvattaa suosiotaan osin R:n kustannuksella. ## Malleja ja käsitekarttoja aiheeseen CRISP-DM malli kuvaa avoimen standardin prosessikuvauksen datatieteen prosessista ![CRISP-DM](https://upload.wikimedia.org/wikipedia/commons/b/b9/CRISP-DM_Process_Diagram.png) Prosessimallin kautta pystyy myös ymmärtämään mitä edellytetään [hyvältä datieteilijältä](https://www.schoolofdatascience.amsterdam/news/skills-need-become-modern-data-scientist/). Tästä pääsee mukavasti tutustumaan [datatieteen metrokarttaan](http://nirvacana.com/thoughts/2013/07/08/becoming-a-data-scientist/). Täysin relevantti kysymys on mielestäni se, onko realistista että yksi henkilö hallitsee näin laajan kokonaisuuden ja minkälaisia painotuksia oheisen metrokartan sisällä voi tehdä niin että osaaminen on edelleen relevanttia. # Datatieteen edellytykset ## Data Globaalisti käytössämme on ennen näkemätön määrä dataa. Arvioimme että vuoteen 2025 mennessä käytössä on [163 Zetabittiä](https://www.forbes.com/sites/andrewcave/2017/04/13/what-will-we-do-when-the-worlds-data-hits-163-zettabytes-in-2025/) dataa, tai kuvattuna toisella tavalla, luomme joka minuutti [käsittämättömän määrän dataa](https://www.domo.com/learn/data-never-sleeps-5?aid=ogsm072517_1&sf100871281=1). Onko realistista että edes ymmärrämme onko tämä määrä dataa hyödyllistä tai mitä tällä datalla voidaan saada aikaan? Data on ollut keskiössä tekoälyn toisessa aallossa, jossa keskiössä on nimenomaisesti tilastollinen oppiminen. Nykyinen tekoälyyn liittyvä toiminta fokusoituu nimenomaan koneoppimiseen ja erityisesti syviin neuroverkkoihin. Tämä ei ole ihme, sillä viime vuosien merkittävimmät menestystarinat perustuvat juuri näihin teknologioihin. Käytettävissä olevat valtavat datamäärät, hyvät kehitystyökalut ja vuosittain kasvava laskentateho vauhdittavat kehitystä. Dataa on myös julkisesti saatavilla enemmän kuin koskaan ennnen. Hyvänä esimerkkinä on vaikka [Kaggle](www.kaggle.com), joka antaa mahdollisuuden ladata itselle mielenkiintoisia aineistoja eri tarkoituksiin ``` import pandas as pd df = pd.read_csv("Mall_Customers.csv") df.tail() df.head() df['Gender'] = df['Genre'] import numpy as np df.pivot_table(df, index=["Gender"], aggfunc=np.mean) ``` ## Laskentateho Laskentatehokasvu on selkeästi yksi merkittävin mekanismi data tieteen kehittymiseen. Kaikkihan tavalla tai toisella liittyy Mooren lakiin, eli kykyymme suorittaa laskutoimituksia. !["Mooren laki"](https://upload.wikimedia.org/wikipedia/commons/9/9d/Moore%27s_Law_Transistor_Count_1971-2016.png) Laskentatehon kasvun lisäksi tekniset ratkaisut skaalata yksittäisen koneen tai klusterin laskentatehoa ovat kehittyneet merkittävästi. Nämä tekevät jopa yksittäisestä koneesta huomattavan tehokkaan työyksikön ``` from IPython.core.display import HTML HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/tQBovBvSDvA?start=1808" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>') ``` ## Analyysiympäristö Erityyppiset laskentaympäristöt voidaan karkeasti jakaa kuuteen. Vaihtoehdot kasvavat henkilökohtaisesta koneesta aina pilviratkaisuihin tai laskentaklustereihin. !["laskentaympäristöt"](https://www.tutorialspoint.com/assets/questions/media/11371/Computing%20Environments.PNG) ``` HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/4paAY2kseCE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>') ``` ## Työkalut Käytettävissä olevien työkalujen määrä on kasvanut huimasti. Aikaisemmin käytössä oli lähinnä tilastolaskentaympäristöt kuten [R](https://www.r-project.org/) joita korvaamaan/lisäämään on nyt tullut Python ympäristöt. Tämän sisällä keskeisiä työkaluja ovat esimerkiksi [Pandas](https://pandas.pydata.org/), [Scikit-learn](https://scikit-learn.org/stable/) ja visualisointi työkalut kuten [Holoviews](http://holoviews.org/) !["Scikit-learn map"](https://scikit-learn.org/stable/_static/ml_map.png) ``` HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/k27MJJLJNT4?start=1808" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>') ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Dogs vs Cats Image Classification Without Image Augmentation <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c01_dogs_vs_cats_without_augmentation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c01_dogs_vs_cats_without_augmentation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> In this tutorial, we will discuss how to classify images into pictures of cats or pictures of dogs. We'll build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`. ## Specific concepts that will be covered: In the process, we will build practical experience and develop intuition around the following concepts * Building _data input pipelines_ using the `tf.keras.preprocessing.image.ImageDataGenerator` class — How can we efficiently work with data on disk to interface with our model? * _Overfitting_ - what is it, how to identify it? <hr> **Before you begin** Before running the code in this notebook, reset the runtime by going to **Runtime -> Reset all runtimes** in the menu above. If you have been working through several notebooks, this will help you avoid reaching Colab's memory limits. # Importing packages Let's start by importing required packages: * os — to read files and directory structure * numpy — for some matrix math outside of TensorFlow * matplotlib.pyplot — to plot the graph and display images in our training and validation data ``` import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import matplotlib.pyplot as plt import numpy as np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) ``` # Data Loading To build our image classifier, we begin by downloading the dataset. The dataset we are using is a filtered version of <a href="https://www.kaggle.com/c/dogs-vs-cats/data" target="_blank">Dogs vs. Cats</a> dataset from Kaggle (ultimately, this dataset is provided by Microsoft Research). In previous Colabs, we've used <a href="https://www.tensorflow.org/datasets" target="_blank">TensorFlow Datasets</a>, which is a very easy and convenient way to use datasets. In this Colab however, we will make use of the class `tf.keras.preprocessing.image.ImageDataGenerator` which will read data from disk. We therefore need to directly download *Dogs vs. Cats* from a URL and unzip it to the Colab filesystem. ``` _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) ``` The dataset we have downloaded has the following directory structure. <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" > <b>cats_and_dogs_filtered</b> |__ <b>train</b> |______ <b>cats</b>: [cat.0.jpg, cat.1.jpg, cat.2.jpg ...] |______ <b>dogs</b>: [dog.0.jpg, dog.1.jpg, dog.2.jpg ...] |__ <b>validation</b> |______ <b>cats</b>: [cat.2000.jpg, cat.2001.jpg, cat.2002.jpg ...] |______ <b>dogs</b>: [dog.2000.jpg, dog.2001.jpg, dog.2002.jpg ...] </pre> We can list the directories with the following terminal command: ``` zip_dir_base = os.path.dirname(zip_dir) !find $zip_dir_base -type d -print ``` We'll now assign variables with the proper file path for the training and validation sets. ``` base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures ``` ### Understanding our data Let's look at how many cats and dogs images we have in our training and validation directory ``` num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val print('total training cat images:', num_cats_tr) print('total training dog images:', num_dogs_tr) print('total validation cat images:', num_cats_val) print('total validation dog images:', num_dogs_val) print("--") print("Total training images:", total_train) print("Total validation images:", total_val) ``` # Setting Model Parameters For convenience, we'll set up variables that will be used later while pre-processing our dataset and training our network. ``` BATCH_SIZE = 100 # Number of training examples to process before updating our models variables IMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels ``` # Data Preparation Images must be formatted into appropriately pre-processed floating point tensors before being fed into the network. The steps involved in preparing these images are: 1. Read images from the disk 2. Decode contents of these images and convert it into proper grid format as per their RGB content 3. Convert them into floating point tensors 4. Rescale the tensors from values between 0 and 255 to values between 0 and 1, as neural networks prefer to deal with small input values. Fortunately, all these tasks can be done using the class **tf.keras.preprocessing.image.ImageDataGenerator**. We can set this up in a couple of lines of code. ``` train_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our training data validation_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our validation data ``` After defining our generators for training and validation images, **flow_from_directory** method will load images from the disk, apply rescaling, and resize them using single line of code. ``` train_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150) class_mode='binary') val_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=validation_dir, shuffle=False, target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150) class_mode='binary') ``` ### Visualizing Training images We can visualize our training images by getting a batch of images from the training generator, and then plotting a few of them using `matplotlib`. ``` sample_training_images, _ = next(train_data_gen) ``` The `next` function returns a batch from the dataset. One batch is a tuple of (*many images*, *many labels*). For right now, we're discarding the labels because we just want to look at the images. ``` # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip(images_arr, axes): ax.imshow(img) plt.tight_layout() plt.show() plotImages(sample_training_images[:5]) # Plot images 0-4 ``` # Model Creation ## Define the model The model consists of four convolution blocks with a max pool layer in each of them. Then we have a fully connected layer with 512 units, with a `relu` activation function. The model will output class probabilities for two classes — dogs and cats — using `softmax`. ``` model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(2) ]) ``` ### Compile the model As usual, we will use the `adam` optimizer. Since we output a softmax categorization, we'll use `sparse_categorical_crossentropy` as the loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so we are passing in the metrics argument. ``` model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) ``` ### Model Summary Let's look at all the layers of our network using **summary** method. ``` model.summary() ``` ### Train the model It's time we train our network. Since our batches are coming from a generator (`ImageDataGenerator`), we'll use `fit_generator` instead of `fit`. ``` EPOCHS = 100 history = model.fit_generator( train_data_gen, steps_per_epoch=int(np.ceil(total_train / float(BATCH_SIZE))), epochs=EPOCHS, validation_data=val_data_gen, validation_steps=int(np.ceil(total_val / float(BATCH_SIZE))) ) ``` ### Visualizing results of the training We'll now visualize the results we get after training our network. ``` acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(EPOCHS) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.savefig('./foo.png') plt.show() ``` As we can see from the plots, training accuracy and validation accuracy are off by large margin and our model has achieved only around **70%** accuracy on the validation set (depending on the number of epochs you trained for). This is a clear indication of overfitting. Once the training and validation curves start to diverge, our model has started to memorize the training data and is unable to perform well on the validation data.
github_jupyter
# Generative Adversarial Network In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits! GANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out: * [Pix2Pix](https://affinelayer.com/pixsrv/) * [CycleGAN](https://github.com/junyanz/CycleGAN) * [A whole list](https://github.com/wiseodd/generative-models) The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator. ![GAN diagram](assets/gan_diagram.png) The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator. The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow. ``` %matplotlib inline import pickle as pkl import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data') ``` ## Model Inputs First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input `inputs_real` and the generator input `inputs_z`. We'll assign them the appropriate sizes for each of the networks. ``` def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z') return inputs_real, inputs_z ``` ## Generator network ![GAN Network](assets/gan_network.png) Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values. #### Variable Scope Here we need to use `tf.variable_scope` for two reasons. Firstly, we're going to make sure all the variable names start with `generator`. Similarly, we'll prepend `discriminator` to the discriminator variables. This will help out later when we're training the separate networks. We could just use `tf.name_scope` to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also _sample from it_ as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the `reuse` keyword for `tf.variable_scope` to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again. To use `tf.variable_scope`, you use a `with` statement: ```python with tf.variable_scope('scope_name', reuse=False): # code here ``` Here's more from [the TensorFlow documentation](https://www.tensorflow.org/programmers_guide/variable_scope#the_problem) to get another look at using `tf.variable_scope`. #### Leaky ReLU TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to `tf.maximum`. Typically, a parameter `alpha` sets the magnitude of the output for negative values. So, the output for negative input (`x`) values is `alpha*x`, and the output for positive `x` is `x`: $$ f(x) = max(\alpha * x, x) $$ #### Tanh Output The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1. ``` def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('generator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(z, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) # Logits and tanh output logits = tf.layers.dense(h1, out_dim, activation=None) out = tf.tanh(logits) return out ``` ## Discriminator The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer. ``` def discriminator(x, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('discriminator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(x, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) logits = tf.layers.dense(h1, 1, activation=None) out = tf.sigmoid(logits) return out, logits ``` ## Hyperparameters ``` # Size of input image to discriminator input_size = 784 # Size of latent vector to generator z_size = 100 # Sizes of hidden layers in generator and discriminator g_hidden_size = 128 d_hidden_size = 128 # Leak factor for leaky ReLU alpha = 0.01 # Smoothing smooth = 0.1 ``` ## Build network Now we're building the network from the functions defined above. First is to get our inputs, `input_real, input_z` from `model_inputs` using the sizes of the input and z. Then, we'll create the generator, `generator(input_z, input_size)`. This builds the generator with the appropriate input and output sizes. Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as `g_model`. So the real data discriminator is `discriminator(input_real)` while the fake discriminator is `discriminator(g_model, reuse=True)`. ``` tf.reset_default_graph() # Create our input placeholders input_real, input_z = model_inputs(input_size, z_size) # Build the model g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha) # g_model is the generator output d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha) d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha) ``` ## Discriminator and Generator Losses Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_loss_real + d_loss_fake`. The losses will by sigmoid cross-entropys, which we can get with `tf.nn.sigmoid_cross_entropy_with_logits`. We'll also wrap that in `tf.reduce_mean` to get the mean for all the images in the batch. So the losses will look something like ```python tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) ``` For the real image logits, we'll use `d_logits_real` which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter `smooth`. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like `labels = tf.ones_like(tensor) * (1 - smooth)` The discriminator loss for the fake data is similar. The logits are `d_logits_fake`, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that. Finally, the generator losses are using `d_logits_fake`, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images. ``` # Calculate losses d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1 - smooth))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_real))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake))) ``` ## Optimizers We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use `tf.trainable_variables()`. This creates a list of all the variables we've defined in our graph. For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with `generator`. So, we just need to iterate through the list from `tf.trainable_variables()` and keep variables to start with `generator`. Each variable object has an attribute `name` which holds the name of the variable as a string (`var.name == 'weights_0'` for instance). We can do something similar with the discriminator. All the variables in the discriminator start with `discriminator`. Then, in the optimizer we pass the variable lists to `var_list` in the `minimize` method. This tells the optimizer to only update the listed variables. Something like `tf.train.AdamOptimizer().minimize(loss, var_list=var_list)` will only train the variables in `var_list`. ``` # Optimizers learning_rate = 0.002 # Get the trainable_variables, split into G and D parts t_vars = tf.trainable_variables() g_vars = [var for var in t_vars if var.name.startswith('generator')] d_vars = [var for var in t_vars if var.name.startswith('discriminator')] d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars) ``` ## Training ``` batch_size = 100 epochs = 100 samples = [] losses = [] # Only save generator variables saver = tf.train.Saver(var_list=g_vars) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images, reshape and rescale to pass to D batch_images = batch[0].reshape((batch_size, 784)) batch_images = batch_images*2 - 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size)) # Run optimizers _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z}) _ = sess.run(g_train_opt, feed_dict={input_z: batch_z}) # At the end of each epoch, get the losses and print them out train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images}) train_loss_g = g_loss.eval({input_z: batch_z}) print("Epoch {}/{}...".format(e+1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_g)) # Sample from generator as we're training for viewing afterwards sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha), feed_dict={input_z: sample_z}) samples.append(gen_samples) saver.save(sess, './checkpoints/generator.ckpt') # Save training generator samples with open('train_samples.pkl', 'wb') as f: pkl.dump(samples, f) ``` ## Training loss Here we'll check out the training losses for the generator and discriminator. ``` fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator') plt.plot(losses.T[1], label='Generator') plt.title("Training Losses") plt.legend()plt.legend() ``` ## Generator samples from training Here we can view samples of images from the generator. First we'll look at images taken while training. ``` def view_samples(epoch, samples): fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) im = ax.imshow(img.reshape((28,28)), cmap='Greys_r') return fig, axes # Load samples from generator taken while training with open('train_samples.pkl', 'rb') as f: samples = pkl.load(f) ``` These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make. ``` _ = view_samples(-1, samples) ``` Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion! ``` rows, cols = 10, 6 fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True) for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes): for img, ax in zip(sample[::int(len(sample)/cols)], ax_row): ax.imshow(img.reshape((28,28)), cmap='Greys_r') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) ``` It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s. ## Sampling from the generator We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples! ``` saver = tf.train.Saver(var_list=g_vars) with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha), feed_dict={input_z: sample_z}) _ = view_samples(0, [gen_samples]) ```
github_jupyter
# Creates transformation files This notebook creates the transformation files from raw MRI space to normalized SPM space # Imports ``` import sys import os ``` The following line permits to import deep_folding even if this notebook is executed from the notebooks subfolder (and no install has been launched): /notebooks/use_transform.ipynb /deep_folding/__init__.py ``` sys.path.append((os.path.abspath('../'))) import deep_folding print((os.path.dirname(deep_folding.__file__))) ``` # User-specific variables We now assign path names and other user-specific variables. The source directory is where the database lies. It contains the morphologist analysis subfolder ANALYSIS/3T_morphologist ``` src_dir = os.path.join(os.getcwd(), '../data/source/unsupervised') src_dir = os.path.abspath(src_dir) print(("src_dir = " + src_dir)) ``` The target directory tgt_dir is where the files will be saved ``` tgt_dir = os.path.join(os.getcwd(), '../data/target/transform') tgt_dir = os.path.abspath(tgt_dir) print(("tgt_dir = " + tgt_dir)) ref_dir = os.path.join(os.getcwd(), '../data/reference/transform') ref_dir = os.path.abspath(ref_dir) print(("ref_dir = " + ref_dir)) print((sys.argv)) ``` # Illustration of main program uses We will first use the program with no effect by using number of subjects set to 0, or by calling the help function ### Using external calls ``` !python ../deep_folding/anatomist_tools/transform.py -n 0 !python ../deep_folding/anatomist_tools/transform.py --help ``` ### By using the main function call ``` from deep_folding.anatomist_tools import transform print((transform.__file__)) args = "-n 0" argv = args.split(' ') transform.main(argv) args = "--help" argv = args.split(' ') transform.main(argv) tgt_dir ``` ### By using the API function call ``` transform.transform_to_spm(src_dir=src_dir, tgt_dir=tgt_dir, number_subjects=0) ``` # Test example ``` transform.transform_to_spm(src_dir=src_dir, tgt_dir=tgt_dir, number_subjects=1) from soma import aims graph_file = f'{src_dir}/ANALYSIS/3T_morphologist/100206/t1mri/default_acquisition/default_analysis/folds/3.1/default_session_auto/R100206_default_session_auto.arg' graph = aims.read(graph_file) g_to_icbm = aims.GraphManip.getICBMTransform(graph) g_to_icbm g_to_icbm_template = aims.GraphManip.getICBM2009cTemplateTransform(graph) g_to_icbm_template ``` # Result analysis Prints the list of files of the target directory ``` print(('\n'.join(os.listdir(tgt_dir)))) ``` Expected output (we read the transformation file from the reference directory): ``` ref_file = os.listdir(ref_dir)[0] print("ref_file = ", ref_file, '\n') with open(os.path.join(ref_dir,ref_file), 'r') as f: print((f.read())) ``` Obtained output (we read the transformation file from the target directory): ``` tgt_file = os.listdir(tgt_dir)[0] print("tgt_file = ", tgt_file, '\n') with open(os.path.join(tgt_dir,tgt_file), 'r') as f: print((f.read())) ``` Generated README (we read the generated README from the target directory) ``` with open(os.path.join(tgt_dir,"transform.json"), 'r') as f: print((f.read())) print(type(f.read())) from soma import aims t = aims.read(os.path.join(tgt_dir,tgt_file)) t r = aims.read(os.path.join(ref_dir,ref_file)) r t == r ``` # Test of automatic tranform extraction from graph ``` graph_file = f'{src_dir}/ANALYSIS/3T_morphologist/100206/t1mri/default_acquisition/default_analysis/folds/3.1/default_session_auto/R100206_default_session_auto.arg' graph = aims.read(graph_file) g_to_icbm = aims.GraphManip.getICBMTransform(graph) g_to_icbm g_to_icbm_template = aims.GraphManip.getICBM2009cTemplateTransform(graph) g_to_icbm_template g_to_icbm_template == r r ```
github_jupyter
# Lists and Tuples ## Lists Recap A list is a sequence of values. These values can be anything: strings, numbers, booleans, even other lists. To make a list you put the items separated by commas between brackets [] ``` sushi_order = ['unagi', 'hamachi', 'otoro'] prices = [6.50, 5.50, 15.75] print(sushi_order) print(prices) ``` You can access a single element in a list by indexing in using brackets. List indexing starts at 0 so to get the first element, you use 0, the second element is 1 and so on. list[index] ``` print(sushi_order[0]) print(sushi_order[2]) ``` You can find the length of a list using len len(list) ``` print(len(sushi_order)) ``` You can use negative indexing to get the last element of a list ``` print(sushi_order[-3]) ``` ## Nested lists Lists can contain other lists as elements. This is a convenient alternative to a matrix. You can arrange lists of varying lengths (and contents) in a specific order and you can iterate over the elements (see below). ``` everyones_order = [['california roll'], ['unagi', 'dragon roll'], sushi_order] print(everyones_order) ``` To access an element in a nested list, first index to the inner list, then index to the item. Example: ``` list_of_lists = [[1,2], [3,4], []] ``` Acess the first index to the inner list and index to the item ```python inner_list = list_of_lists[1] # [3,4] print inner_list[0] # 3 ``` Or even quicker: ```python list_of_lists[1][0] # 3 ``` To get dragon roll from the sushi order, first we get the second element (index 1) then we get the the second item (index 1) ``` everyones_order[1][1] ``` ### TRY IT Get the california roll from the list `everyones_order`. As a challenge print all the items from the second person's order. ## Mutable Lists Lists are mutable, that means that you can change elements. To assign a new value to an element my_list = [1, 2, 3] my_list[0] = 100 ``` sushi_order[0] = 'caterpillar roll' print(sushi_order) ``` ### TRY IT Update the last element in `prices` to be 21.00 and print out the new result ## Operators and Lists The `in` operator allows you to see if an element is contained in a list ``` sushi_order print('hamachi' in sushi_order) if 'otoro' in sushi_order: print("Big spender!") ``` You can use some arithmatic operators on lists The `+` operator concatenates two lists The `*` operator duplicates a list that many times ``` print(sushi_order * 3) print(prices + sushi_order) ``` Note: You can only concatenate lists with lists! If you want to add a "non-list" element you can use the append() function that we will learn about in the next section. ``` # WRONG prices + 22 ``` Remember slices from strings? We can also use the slice operator on lists ``` inexpensive = sushi_order[:2] #takes only the first two elements from list print(inexpensive) ``` Don't forget, you can use the `for` and `in` keywords to loop through a list ``` for item in sushi_order: print("I'd like to order the {}.".format(item)) print("And hold the wasabi!") for idx, item in enumerate(sushi_order): print("I'd like to order the {0} for {1}.".format(item, prices[idx])) ``` ### TRY IT Create a variable called `lots_of_sushi` that repeats the inexpensive list two times ## Adding and deleting elements To add an element to a list, you have a few options 1. the append method adds an element or elements to the end of a list, if you pass it a list, the next element with be a list (making a list of lists) 2. the extend method takes a list of elements and adds them all to the end, not creating a list of lists 3. use the `+` operator like you saw before ``` my_sushis = ['maguro', 'rock n roll'] my_sushis.append('avocado roll') print(my_sushis) my_sushis.append(['hamachi', 'california roll']) print(my_sushis) my_sushis = ['maguro', 'rock n roll'] my_sushis.extend(['hamachi', 'california roll']) print(my_sushis) ``` You also have several options for removing elements 1. the pop method takes the index of the element to remove and returns the value of the element 2. the remove method takes the value of the element to remove 3. the del operator deletes the element or slice of the list that you give it del l[1:] ``` print(my_sushis) last_sushi = my_sushis.pop(-1) print(last_sushi) my_sushis.remove('maguro') print(my_sushis) del my_sushis[1:] print(my_sushis) ``` ### TRY IT Add 'rock n roll' to `sushi_order` then delete the first element of `sushi_order` ## List Functions `max` will return maximum value of list `min` returns minimum value of list `sum` returns the sum of the values in a list `len` returns the number of elements in a list # Just a reminder ``` numbers = [1, 1, 2, 3, 5, 8] print(max(numbers)) print(min(numbers)) print(sum(numbers)) print(len(numbers)) ``` ### TRY IT Find the average of `numbers` using list functions (and not a loop!) (And if you are feeling self-loathing, look back at lesson 4 and see how many lines of code it took to do this without aggregation functions) ## Aliasing If you assign a list to another variable, it will still refer to the same list. This can cause trouble if you change one list because the other will change too. ``` cooked_rolls = ['unagi roll', 'shrimp tempura roll'] my_order = cooked_rolls my_order.append('hamachi') print(my_order) print(cooked_rolls) ``` To check this, you can use the `is` operator to see if both variable refer to the same object ``` print(my_order is cooked_rolls) ``` To fix this, you can make a copy of the list using the list function `list` takes a sequence and turns it into a list. Alternatively you can use the `copy()` method: `my_order = cooked_rolls.copy()` ``` cooked_rolls = ['unagi roll', 'shrimp tempura roll'] my_order = list(cooked_rolls) my_order.append('hamachi') print(my_order) print(cooked_rolls) ``` ## Tuples Tuples are very similar to lists. The major difference is that tuples are immutable meaning that you can not add, remove, or assign new values to a tuple. The creator of a tuple is the comma `,` but by convention people usually surround tuples with parenthesis. ``` noodles = ('soba', 'udon', 'ramen', 'lo mein', 'somen', 'rice noodle') print(type(noodles)) ``` You can create a tuple from any sequence using the `tuple` function ``` sushi_tuple = tuple(my_order) print(sushi_tuple) # Remember strings are sequences maguro = tuple('maguro') print(maguro) ``` To create a single element tuple, you need to add a comma to the end of that element (it looks kinda weird) ``` single_element_tuple = (1,) print(single_element_tuple) print(type(single_element_tuple)) ``` You can use the indexing and slicing you learned for lists the same with tuples. But, because tuples are immutable, you cannot use the append, pop, del, extend, or remove methods or even assign new values to indexes ``` print(noodles[0]) print(noodles[4:]) # This should throw an error noodles[0] = 'spaghetti' ``` To change the values in a tuple, you need to create a new tuple (there is nothing stopping you from assigning it to the same variable, though ``` print(sushi_tuple) sushi_tuple = sushi_tuple[1:] + ('california roll',) print(sushi_tuple) ``` You can loop through tuples the same way you loop through lists, using `for` `in` ``` for noodle in noodles: print("Yummy, yummy {0}".format(noodle)) ``` ### TRY IT Create a tuple containing 'soy sauce' 'ginger' and 'wasabi' and save it in a variable called `accompaniments` ## Zip the zip function takes any number of lists of the same length and returns a generator for lists of tuples where the tuples will contain the i-th element from each of the lists. This is really useful when combining lists that are related (especially for looping) ** remider ** to print a generator, just cast the results as a list (you can cast them as a tuple instead, actually) ``` print(list(zip([1,2,3], [4,5,6]))) sushi = ['salmon', 'tuna', 'sea urchin'] prices = [5.5, 6.75, 8] sushi_and_prices = list(zip(sushi, prices)) sushi_and_prices for sushi, price in sushi_and_prices: print("The {0} costs ${1}".format(sushi, price)) ``` ## Enumerate While the zip function iterates over two lists, the built-in function enumerate loops through indices and elements of a list. It returns a list of tuples containing the index and value of that element. for index, value in enumerate(list): ... ``` exotic_sushi = ['tako', 'toro', 'uni', 'hirame'] for index, item in enumerate(exotic_sushi): print(index, item) ``` # Project: Party Budget You are tasked with writing budgeting software, but at this point, things are a mess. You have two files. `budget_prices.txt` has a list of costs for each item separated by new lines (\n). `budget_items.txt` has a list of the items that were bought. Luckily they are both in order. You need to write a program that will take the files and a value for the overall budget and print out the total spent and how close they are to reaching the budget. In step 2 you will create a new file where the items and prices are in the same document and there is a sum printed out at the end. ## Step 1 1. Create a function called `file_to_float_list` that takes in a file and returns a list containing a float for each line **Hint** Make sure to remove the newlines when casting as floats. 2. Store the budget of 2000.00 in a variable called `budget`. 3. Run file_to_float_list on budget_prices.txt and save the result in a variable called `prices`. 4. Calculate the `sum` of the prices array and store in a variable called `spent`. 5. Calculate the percentage of budget spent and store in a variable called `percent_spent` 6. Print out the results: Budget: 2000.00 Spent: (amt spent) Percentage Spent: (percent spent) 7. ** Bonus ** Print out a progress bar for the budget. Print out '=' for every 10% spent and '-' for every 10% unspent. =====>----- ## Step 2 1. Create a function called `file_to_string_list` that takes in a file and returns a list containing a string for each line with newlines removed. 2. Run `file_to_string_list` on budget_items.txt and save the result in a variable called `stuff_bought`. 3. Zip `stuff_bought` and `prices` together and store in a variable called `items_and_prices` 4. Loop through `items_and_prices` and print out the item, then a tab character '\t' and then the price (use string formatting) 5. Print another line 'Sum\t(amount spent)' 6. Print a final line 'Budget\t(budget)' 7. **Bonus** Print everything you printed for step 2 into a new file. (Then open the file in excel.)
github_jupyter
# Tokenizing text ``` from nb_200 import * ``` ## Preprocessing the dataset ``` path = untar_data(URLs.IMDB) # export from multiprocessing import Process, Queue import spacy,html from spacy.symbols import ORTH from fastprogress import progress_bar,master_bar import pickle,random ``` Before even tokenizing, we will apply a bit of preprocessing on the texts to clean them up (we saw the one up there had some HTML code). These rules are applied before we split the sentences in tokens. ``` #export #special tokens UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split() def sub_br(t): "Replaces the <br /> by \n" re_br = re.compile(r'<\s*br\s*/?>', re.IGNORECASE) return re_br.sub("\n", t) def spec_add_spaces(t): "Add spaces around / and #" return re.sub(r'([/#])', r' \1 ', t) def rm_useless_spaces(t): "Remove multiple spaces" return re.sub(' {2,}', ' ', t) def replace_rep(t): "Replace repetitions at the character level: cccc -> TK_REP 4 c" def _replace_rep(m:Collection[str]) -> str: c,cc = m.groups() return f' {TK_REP} {len(cc)+1} {c} ' re_rep = re.compile(r'(\S)(\1{3,})') return re_rep.sub(_replace_rep, t) def replace_wrep(t): "Replace word repetitions: word word word -> TK_WREP 3 word" def _replace_wrep(m:Collection[str]) -> str: c,cc = m.groups() return f' {TK_WREP} {len(cc.split())+1} {c} ' re_wrep = re.compile(r'(\b\w+\W+)(\1{3,})') return re_wrep.sub(_replace_wrep, t) def fixup_text(x): "Various messy things we've seen in documents" re1 = re.compile(r' +') x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace( 'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace( '<br />', "\n").replace('\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace( ' @-@ ','-').replace('\\', ' \\ ') return re1.sub(' ', html.unescape(x)) default_pre_rules = [fixup_text, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces, sub_br] default_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ] replace_rep('cccc') replace_wrep('word word word word word ') ``` These rules are applies after the tokenization on the list of tokens. ``` #export def replace_all_caps(x): "Replace tokens in ALL CAPS by their lower version and add `TK_UP` before." res = [] for t in x: if t.isupper() and len(t) > 1: res.append(TK_UP); res.append(t.lower()) else: res.append(t) return res def deal_caps(x): "Replace all Capitalized tokens in by their lower version and add `TK_MAJ` before." res = [] for t in x: if t == '': continue if t[0].isupper() and len(t) > 1 and t[1:].islower(): res.append(TK_MAJ) res.append(t.lower()) return res def add_eos_bos(x): return [BOS] + x + [EOS] default_post_rules = [deal_caps, replace_all_caps, add_eos_bos] replace_all_caps(['I', 'AM', 'SHOUTING']) deal_caps(['My', 'name', 'is', 'Jeremy']) ``` A Tokenizer should implement two methods: init with a certain language and some special tokens, then `tokenize_pipe` which returns a generator that yields the tokenized texts (should take a generator). `chunksize` is used for some tokenizers like spacy that can treat items as batches. ``` class BaseTokenizer(): def __init__(self, lang, special_toks): pass def pipe(self, items): for t in items: yield t.split(' ') class SpacyTokenizer(): def __init__(self, lang='en', special_toks=None, batch_size=5000): special_toks = ifnone(special_toks, default_spec_tok) self.nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"]) for w in default_spec_tok: self.nlp.tokenizer.add_special_case(w, [{ORTH: w}]) self.batch_size=batch_size def pipe(self, items): for doc in self.nlp.pipe(items, batch_size=self.batch_size): yield [d.text for d in doc] def apply_rules(items, rules): for o in items: yield apply_all(o, rules) def tokenize1(text, tok_func=SpacyTokenizer, pre_rules=None, post_rules=None, **tok_kwargs): pre_rules = listify(ifnone(pre_rules, default_pre_rules.copy())) post_rules = listify(ifnone(post_rules, default_post_rules.copy())) tokenizer = tok_func(**tok_kwargs) for tok in tokenizer.pipe(apply_rules([text], pre_rules)): tok = apply_all(tok, post_rules) return tok ``` Returns a generator from `items` after applying `rules` to them. A basic function that reads the content of file. ``` def read_text(fname): with open(fname, 'r') as f: return f.read() ``` The main function that will be called during tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs`, then iterate through the `items`, apply them `pre_rules`, tokenize them, apply them `post_rules`, then apply `output_func` to the original item and the tokens and put the result in `output_queue`. If a `data_queue` is passed, we count the different tokens and return the Counter in it at the end. ``` def tok_items(items, tok_func, pre_rules, post_rules, output_func, output_queue, data_queue=None, **tok_kwargs): tokenizer = tok_func(**tok_kwargs) if data_queue: counts = Counter() for i,tok in enumerate(tokenizer.pipe(apply_rules(items, pre_rules))): tok = apply_all(tok, post_rules) output_queue.put(output_func(items[i], tok)) if data_queue: counts.update(Counter(tok)) if data_queue: data_queue.put(counts) ``` Helper function to create the same directory structure as in a given folder. ``` def create_folders(path, output_dir, include=None): output_dir = Path(output_dir) os.makedirs(output_dir, exist_ok=True) for i,(p,d,f) in enumerate(os.walk(path)): # returns (dirpath, dirnames, filenames) if include is not None and i==0: d[:] = [o for o in d if o in include] else: d[:] = [o for o in d if not o.startswith('.')] for x in d: os.makedirs(output_dir/(Path(p)/Path(x)).relative_to(path), exist_ok=True) ``` Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`). ``` SEP = '▁' fname = path/'labels.csv' fname.suffix def tok_folder(path, extensions=['.txt'], include=None, output_dir=None, n_workers=4, pre_rules=None, post_rules=None, tok_func=SpacyTokenizer, **tok_kwargs): path = Path(path) fnames = get_files(path, extensions=extensions, recurse=True, include=include) output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok')) create_folders(path, output_dir, include=include) pre_rules = [read_text] + listify(ifnone(pre_rules, default_pre_rules.copy())) post_rules = listify(ifnone(post_rules, default_post_rules.copy())) output_queue,data_queue = Queue(maxsize=n_workers),Queue(maxsize=n_workers) def _output(o, tok): out = output_dir/o.relative_to(path) with open(out, 'w') as f: f.write(SEP.join(tok)) with open(out.parent/f'{out.stem}.len', 'w') as f: f.write(str(len(tok))) return 1 processes = [Process(target=tok_items, args=(batch, tok_func, pre_rules, post_rules, _output, output_queue), kwargs={'data_queue': data_queue, **tok_kwargs}) for i,batch in enumerate(np.array_split(fnames, n_workers))] for p in processes: p.start() counter = Counter() for _ in progress_bar(fnames, leave=False): _ = output_queue.get() for _ in processes: counter.update(data_queue.get()) for p in processes: p.join() pickle.dump(counter, open(output_dir/'counter.pkl','wb')) path = untar_data(URLs.IMDB) # test fnames = get_files(path, extensions=['.txt'], recurse=True, include=['train', 'test', 'unsup']) tok_path = path.parent/'imdb_tok' assert tok_path.exists() #Take one file randomly idx = random.randint(0, len(fnames)-1) #Check we have the corresponding tokenized version... tok_fname = tok_path/(fnames[idx].relative_to(path)) assert tok_fname.exists() text = read_text(fnames[idx]) tok = tokenize1(text) assert SEP.join(tok) == read_text(tok_fname) len_fname = tok_fname.parent/f'{tok_fname.stem}.len' assert len(tok) == int(read_text(len_fname)) ``` When text is in a dataframe, we need to merge the text columns, and maybe mark_fields. ``` def join_texts(idx, df, mark_fields=False): return ' '.join([(f'{FLD} {i} ' if mark_fields else '') + t for i,t in enumerate(df.iloc[int(idx)].values)]) ``` Preprocessing function for texts in a dataframe. Tokenized texts will be put in a similar dataframe with just one column of texts and the other columns the same. ``` def tok_df(df, text_cols, n_workers=4, pre_rules=None, post_rules=None, mark_fields=None, tok_func=SpacyTokenizer, **tok_kwargs): text_cols = listify(text_cols) mark_fields = ifnone(mark_fields, len(listify(text_cols)) > 1) pre_rules = listify(ifnone(pre_rules, default_pre_rules.copy())) pre_rules = [partial(join_texts, df=df[text_cols], mark_fields=mark_fields)] + pre_rules post_rules = listify(ifnone(post_rules, default_post_rules.copy())) output_queue,data_queue = Queue(maxsize=n_workers),Queue(maxsize=n_workers) def _output(o, tok): return (o,tok) processes = [Process(target=tok_items, args=(batch, tok_func, pre_rules, post_rules, _output, output_queue), kwargs={'data_queue': data_queue, **tok_kwargs}) for i,batch in enumerate(np.array_split(range(len(df)), n_workers))] for p in processes: p.start() lengths,outputs,counter = np.zeros(len(df)),np.zeros(len(df), dtype=np.object),Counter() for _ in progress_bar(range(len(df)), leave=False): i,tok = output_queue.get() lengths[i],outputs[i] = len(tok),SEP.join(tok) for _ in processes: counter.update(data_queue.get()) for p in processes: p.join() other_cols = [c for c in df.columns if c not in text_cols] res = df[other_cols].copy() res['text'],res['text_lengths'] = outputs,lengths return res, counter # test path = untar_data(URLs.IMDB_SAMPLE) df = pd.read_csv(path/'texts.csv') out,cnt = tok_df(df, text_cols='text') test_eq(set(out.columns),set(list(df.columns)+['text_lengths'])) idx = random.randint(0, len(df)-1) text = df['text'][idx] tok = tokenize1(text) test_eq(SEP.join(tok), out['text'][idx]) test_eq(len(tok), out['text_lengths'][idx]) #With two fields, mark fields become true by default df['text1'] = df['text'] out,cnt = tok_df(df, text_cols=['text', 'text1']) idx = random.randint(0, len(df)-1) text = f"{FLD} 0 {df['text'][idx]} {FLD} 1 {df['text1'][idx]}" tok = tokenize1(text) test_eq(SEP.join(tok), out['text'][idx]) test_eq(len(tok), out['text_lengths'][idx]) def tok_csv(fname, text_cols, outname=None, n_workers=4, pre_rules=None, post_rules=None, mark_fields=None, tok_func=SpacyTokenizer, header='infer', chunksize=None, **tok_kwargs): df = pd.read_csv(fname, header=header, chunksize=chunksize) outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv')) kwargs = dict(n_workers=n_workers, pre_rules=pre_rules, post_rules=post_rules, mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs) if chunksize is None: out,cnt = tok_df(df, text_cols, **kwargs) out.to_csv(outname, header=header, index=False) else: cnt = Counter() for i,dfp in enumerate(df): out,c = tok_df(dfp, text_cols, **kwargs) out.to_csv(outname, header=header if i==0 else None, index=False, mode='w' if i==0 else 'a') cnt.update(c) pickle.dump(cnt, open(outname.parent/'counter.pkl', 'wb')) #test path = untar_data(URLs.IMDB_SAMPLE) tok_csv(path/'texts.csv', 'text') assert (path/'texts_tok.csv').exists() df = pd.read_csv(path/'texts.csv') df_tok = pd.read_csv(path/'texts_tok.csv') idx = random.randint(0, len(df)-1) text = df['text'][idx] tok = tokenize1(text) test_eq(SEP.join(tok), df_tok['text'][idx]) test_eq(len(tok), df_tok['text_lengths'][idx]) #test path = untar_data(URLs.IMDB_SAMPLE) tok_csv(path/'texts.csv', 'text', chunksize=500) assert (path/'texts_tok.csv').exists() df = pd.read_csv(path/'texts.csv') df_tok = pd.read_csv(path/'texts_tok.csv') test_eq(len(df_tok), len(df)) idx = random.randint(0, len(df)-1) text = df['text'][idx] tok = tokenize1(text) test_eq(SEP.join(tok), df_tok['text'][idx]) test_eq(len(tok), df_tok['text_lengths'][idx]) ``` ## Getting in a DataBunch ### Text data blocks ``` import collections class ReadTokens(Transform): def __call__(self, o): text = read_text(o) if isinstance(o, Path) else str(o) return text.split(SEP) def decode(self, o): return SEP.join(o) def show(self, x, ax): print(x) class Numericalize(MultiCategorize): _order = 5 def __init__(self, vocab): self.vocab = vocab self.o2i = collections.defaultdict(int, {w:i for i,w in enumerate(vocab)}) class Text(Item): tfm = [ReadTokens, Numericalize] def text_getter(suf='', **kwargs): def _inner(o, **kwargs): return get_files(o/suf, extensions=['.txt'], recurse=True) return _inner class ImdbData(DataBlock): types = (Text,Item) get_items = text_getter() split = random_splitter() label_func = lambda fn,self: int(read_text(fn.parent/f'{fn.stem}.len')) path = untar_data(URLs.IMDB) path_tok = path.parent/'imdb_tok' counter = pickle.load(open(path_tok/'counter.pkl', 'rb')) vocab = [w for w,i in counter.most_common(60000) if i >= 2] dblk = ImdbData(path_tok, tfms_x=[ReadTokens(), Numericalize(vocab)]) dsrc = dblk.datasource() x,y = dsrc.get(0,0) t = dsrc.decode((x,y)) t ``` ### Batching ``` class LM_PreLoader(): def __init__(self, fl, lengths=None, bs=64, bptt=70, shuffle=False): self.fl,self.bs,self.bptt,self.shuffle = fl,bs,bptt,shuffle self.lengths = [len(o[0]) for o in fl] if lengths is None else lengths self.n_batch = sum(self.lengths) // bs self.batchify() def __len__(self): return ((self.n_batch-1) // self.bptt) * self.bs def __getitem__(self, i): k = (i % self.bs) * self.n_batch + (i // self.bs) * self.bptt item_idx = (self.cumlen > k).nonzero().min().item() offset = k if item_idx==0 else k-self.cumlen[item_idx-1] text = self.fl[item_idx][0][offset:] while len(text) <= self.bptt: item_idx += 1 text += self.fl[item_idx][0] return tensor(text[:self.bptt]),tensor(text[1:self.bptt+1]) def batchify(self): self.idxs = torch.randperm(len(fl)) if self.shuffle else tensor(range(len(self.fl))) self.cumlen = (tensor(self.lengths)[idxs] if self.shuffle else tensor(self.lengths)).cumsum(0) #test ds = LM_PreLoader(dsrc[0], lengths=lengths) x,y = ds[0] test_equal(x[1:], y[:-1]) x0,x1 = dsrc.get(0,0)[0],dsrc.get(1,0)[0] test_equal(x, tensor(x0+x1)[:70]) test_equal(ds[64][0], tensor(x0+x1)[70:140]) k = ds.n_batch x,y = ds[1] offset = k - ds.cumlen[1262] test_equal(x, tensor(dsrc.get(1263,0)[0][offset:offset+70])) data = DataLoader(ds, 64, shuffle=False, num_workers=4) %time for (x,y) in progress_bar(data): pass ```
github_jupyter
# T81-558: Applications of Deep Neural Networks **Module 4: Training for Tabular Data** * Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # Module 4 Material * **Part 4.1: Encoding a Feature Vector for Keras Deep Learning** [[Video]](https://www.youtube.com/watch?v=Vxz-gfs9nMQ&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_04_1_feature_encode.ipynb) * Part 4.2: Keras Multiclass Classification for Deep Neural Networks with ROC and AUC [[Video]](https://www.youtube.com/watch?v=-f3bg9dLMks&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_04_2_multi_class.ipynb) * Part 4.3: Keras Regression for Deep Neural Networks with RMSE [[Video]](https://www.youtube.com/watch?v=wNhBUC6X5-E&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_04_3_regression.ipynb) * Part 4.4: Backpropagation, Nesterov Momentum, and ADAM Neural Network Training [[Video]](https://www.youtube.com/watch?v=VbDg8aBgpck&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_04_4_backprop.ipynb) * Part 4.5: Neural Network RMSE and Log Loss Error Calculation from Scratch [[Video]](https://www.youtube.com/watch?v=wmQX1t2PHJc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_04_5_rmse_logloss.ipynb) # Part 4.1: Encoding a Feature Vector for Keras Deep Learning Neural networks can accept many types of data. We will begin with tabular data, where there are well defined rows and columns. This is the sort of data you would typically see in Microsoft Excel. An example of tabular data is shown below. Neural networks require numeric input. This numeric form is called a feature vector. Each row of training data typically becomes one vector. The individual input neurons each receive one feature (or column) from this vector. In this section, we will see how to encode the following tabular data into a feature vector. ``` import pandas as pd df = pd.read_csv( "https://data.heatonresearch.com/data/t81-558/jh-simple-dataset.csv", na_values=['NA','?']) display(df[0:5]) ``` The following observations can be made from the above data: * The target column is the column that you seek to predict. There are several candidates here. However, we will initially use product. This field specifies what product someone bought. * There is an ID column. This column should not be fed into the neural network as it contains no information useful for prediction. * Many of these fields are numeric and might not require any further processing. * The income column does have some missing values. * There are categorical values: job, area, and product. To begin with, we will convert the job code into dummy variables. ``` dummies = pd.get_dummies(df['job'],prefix="job") print(dummies.shape) display(dummies[0:10]) ``` Because there are 33 different job codes, there are 33 dummy variables. We also specified a prefix, because the job codes (such as "ax") are not that meaningful by themselves. Something such as "job_ax" also tells us the origin of this field. Next, we must merge these dummies back into the main data frame. We also drop the original "job" field, as it is now represented by the dummies. ``` df = pd.concat([df,dummies],axis=1) df.drop('job', axis=1, inplace=True) display(df[0:10]) ``` We also introduce dummy variables for the area column. ``` df = pd.concat([df,pd.get_dummies(df['area'],prefix="area")],axis=1) df.drop('area', axis=1, inplace=True) display(df[0:10]) ``` The last remaining transformation is to fill in missing income values. ``` med = df['income'].median() df['income'] = df['income'].fillna(med) ``` There are more advanced ways of filling in missing values, but they require more analysis. The idea would be to see if another field might give a hint as to what the income were. For example, it might be beneficial to calculate a median income for each of the areas or job categories. This is something to keep in mind for the class Kaggle competition. At this point, the Pandas dataframe is ready to be converted to Numpy for neural network training. We need to know a list of the columns that will make up *x* (the predictors or inputs) and *y* (the target). The complete list of columns is: ``` print(list(df.columns)) ``` This includes both the target and predictors. We need a list with the target removed. We also remove **id** because it is not useful for prediction. ``` x_columns = df.columns.drop('product').drop('id') print(list(x_columns)) ``` ### Generate X and Y for a Classification Neural Network We can now generate *x* and *y*. Note, this is how we generate y for a classification problem. Regression would not use dummies and would simply encode the numeric value of the target. ``` # Convert to numpy - Classification x_columns = df.columns.drop('product').drop('id') x = df[x_columns].values dummies = pd.get_dummies(df['product']) # Classification products = dummies.columns y = dummies.values ``` We can display the *x* and *y* matrices. ``` print(x) print(y) ``` The x and y values are now ready for a neural network. Make sure that you construct the neural network for a classification problem. Specifically, * Classification neural networks have an output neuron count equal to the number of classes. * Classification neural networks should use **categorical_crossentropy** and a **softmax** activation function on the output layer. ### Generate X and Y for a Regression Neural Network For a regression neural network, the *x* values are generated the same. However, *y* does not use dummies. Make sure to replace **income** with your actual target. ``` y = df['income'].values ``` # Module 4 Assignment You can find the first assignment here: [assignment 4](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class1.ipynb)
github_jupyter
### Introduction This script finds the optimal band gaps of mechanical stack III-V-Si solar cells. I uses a detailed balance approach to calculate the I-V of individual subcells. For calculating efficiency, I add up the maximum power of individual subcell and divide it by the total illumination power. Details of how the I-V is calculated can be referred to [this paper](http://arxiv.org/abs/1512.02056). ``` %matplotlib inline import numpy as np from scipy.interpolate import interp2d import matplotlib.pyplot as plt from scipy.io import savemat from iii_v_si import calc_2j_si_eta, calc_2j_si_eta_direct from detail_balanced_MJ import calc_1j_eta def vary_top_eg(top_cell_qe,n_s=1): topcell_eg = np.linspace(0.9, 3, num=100) eta = np.zeros(topcell_eg.shape) for p in range(topcell_eg.shape[0]): eta[p] = calc_2j_si_eta_direct(top_eg=topcell_eg[p], top_rad_eta=1, top_qe=top_cell_qe, bot_rad_eta=1, bot_qe=1, n_s=n_s, mj="MS") print("At AM1.5g, direct band gap assumption of silicon") print("max eta %s:" % eta.max()) print("optimal Eg: %s" % topcell_eg[eta.argmax()]) return topcell_eg,eta ``` ### Assume that the top cell has 100% EQE ``` eg1,eta1=vary_top_eg(1) plt.plot(eg1,eta1) plt.xlabel("top cell's band gap") plt.ylabel("efficiency") plt.savefig("mstopeg.pdf") ``` The maximum efficiency is then **42%**, and the optimal band gap is **1.81 eV**. For two-terminal, 2J devices, maximum efficiency is **41%** with a **1.74-eV** top cell on silicon. As we can see, using mechanical stack did not benefit the efficiency fundamentally. ### Try if different EQE values shift the peak ``` qe_range=np.linspace(0.5,1,num=3) for q in qe_range: eg,eta = vary_top_eg(q) plt.plot(eg,eta,hold=True,label="QE=%s"%q) plt.legend(loc="best") plt.xlabel("top cell's band gap") plt.ylabel("efficiency") ``` Different top cell's EQEs do not change the optimal band gap of the top cell, as expected. ### Assume that the top cell has very low EQE ``` eg1,eta1=vary_top_eg(0.001) plt.plot(eg1,eta1) plt.xlabel("top cell's band gap") plt.ylabel("efficiency") ``` The maximum efficiency in this case is around 30%. Which should be very close the limiting efficiency of 1J GaAs. We can check: ``` # calulate the SQ-limit efficiency of silicon eta = calc_1j_eta(eg=1.12, qe=1, r_eta=1, cell_temperature=300) print(eta) ``` The SQ-limit efficiency is 30%, which is close to the efficiency of the 2J mechanical stack cell with nearly tranparent top cell. ``` ```
github_jupyter
# Import Packages ``` import warnings warnings.filterwarnings("ignore") #Basic import pandas as pd import gensim import nltk import re import numpy as np import math import rpy2.robjects as robjects from rpy2.robjects import pandas2ri import rpy2.robjects.packages as rpackages #For Visualization import matplotlib.pyplot as plt import seaborn as sns sns.set() #For Data Preparation import itertools from itertools import combinations #For Class Definition from sklearn.base import BaseEstimator, TransformerMixin ``` # Import Data ``` df = pd.read_csv('C:/Users/Louis Owen/Desktop/ICoDSA 2020/Dataset/df_prepared.csv') ``` # Data Cleaning ## Basic ``` df.head(2) df.info() ``` ## Missing Values Analysis ``` def missing_values(df): ''' Function to check features with missing values ''' missing_values_feat=[] for column in df.columns: if df[column].isnull().values.any(): missing_values_feat.append(column) return(missing_values_feat) missing_values(df) len(df) df[['AFINN_max_score']].describe() df[pd.isnull(df.AFINN_max_score)].head() ``` Remove rows with nan affin max score, because the clean text is not proper. ``` missing_values(df[~pd.isnull(df.AFINN_max_score)]) df[['SentiWordNet_max_score']].describe() df[pd.isnull(df.SentiWordNet_max_score)].head() ``` Impute 0 for missing values, because the missing value is because no such word in the sentence that belongs to the sentiwordnet synsets and becaue 0 is neutral ``` df[['SentiWordNet_min_score']].describe() df[pd.isnull(df.SentiWordNet_min_score)].head() ``` Impute 0 for missing values, because the missing value is because no such word in the sentence that belongs to the sentiwordnet synsets and becaue 0 is neutral ``` df[['Avg_TFIDF_1-grams']].describe() df[pd.isnull(df['Avg_TFIDF_1-grams'])] ``` Impute 0 for all tf-idf missing values, because the missing value is because no such word which pass the n-gram filter and 0 is the neutral value ## Missing Values Imputation Missing Value Actionable Items: 1. Remove rows with nan [AFINN_max_score], because the clean text is not proper. 2. Impute 0 for [SentiWordNet_max_score] missing values, because the missing value is because no such word in the sentence that belongs to the sentiwordnet synsets and becaue 0 is neutral 3. Impute 0 for [SentiWordNet_min_score] missing values, because the missing value is because no such word in the sentence that belongs to the sentiwordnet synsets and becaue 0 is neutral 4. Impute 0 for all tf-idf missing values, because the missing value is because no such word which pass the n-gram filter and 0 is the neutral value ``` df=df[~pd.isnull(df.AFINN_max_score)] df['SentiWordNet_max_score']=df['SentiWordNet_max_score'].fillna(0) df['SentiWordNet_min_score']=df['SentiWordNet_min_score'].fillna(0) df['Avg_TFIDF_1-grams']=df['Avg_TFIDF_1-grams'].fillna(0) df['Avg_TFIDF_2-grams']=df['Avg_TFIDF_2-grams'].fillna(0) df['Avg_TFIDF_3-grams']=df['Avg_TFIDF_3-grams'].fillna(0) df['Avg_TFIDF_4-grams']=df['Avg_TFIDF_4-grams'].fillna(0) df.info() ``` ## Final Touch ``` df['created_date']=df['created_at'].apply(lambda x: pd.to_datetime(x).date()) df['created_time']=df['created_at'].apply(lambda x: pd.to_datetime(x).time()) df['time']=df['created_time'].apply(lambda x: 1 if (x<pd.to_datetime('09:30:00').time()) else 2 if (x<pd.to_datetime('10:30:00').time()) else 3 if (x<pd.to_datetime('11:30:00').time()) else 4 if (x<pd.to_datetime('12:30:00').time()) else 5 if (x<pd.to_datetime('13:30:00').time()) else 6 if (x<pd.to_datetime('14:30:00').time()) else 7 if (x<pd.to_datetime('15:30:00').time()) else 1) df=df[['created_date', 'time', 'clean_text', 'base_text',d 'POS_VB', 'POS_VBD', 'POS_VBG', 'POS_VBN', 'POS_VBP', 'POS_VBZ', '+num', '-num', '+num%', '-num%', 'word_num', 'ordinal_num', 'num-num', 'num-num%', 'num-num-num', 'num/num', 'only_number', 'call_+num%', 'put_+num%', 'put_-num%', 'bull', 'bear', 'number_of_!', 'number_of_?', 'number_of_$', 'continous_!', 'continous_?', 'AFINN_sum_score', 'AFINN_max_score', 'AFINN_min_score', 'AFINN_pos_ratio', 'AFINN_neg_ratio', 'BingLiu_pos_ratio', 'BingLiu_neg_ratio', 'NRC_Hashtag_sum_score', 'NRC_Hashtag_max_score', 'NRC_Hashtag_min_score', 'NRC_Hashtag_pos_ratio', 'NRC_Hashtag_neg_ratio', 'SentiWordNet_sum_score', 'SentiWordNet_max_score', 'SentiWordNet_min_score', 'SentiWordNet_pos_ratio', 'SentiWordNet_neg_ratio', 'Avg_TFIDF_1-grams', 'Avg_rf_1-grams', 'Avg_TFIDF_2-grams', 'Avg_rf_2-grams', 'Avg_TFIDF_3-grams', 'Avg_rf_3-grams', 'Avg_TFIDF_4-grams', 'Avg_rf_4-grams', 'caps_word', 'hashtags', 'PMI_score']] df ``` # Export ``` df.to_csv('C:/Users/Louis Owen/Desktop/ICoDSA 2020/SENN/Dataset/Final/df_stocktwits_prepared_final.csv',index=False) ```
github_jupyter
### Please read the 'Model Test' section in `verifyml/DEVELOPMENT.md` before going through this notebook. Toy example that shows how a new model test can be created and used. ``` """ListLength test - test passed if the length of a given list is greater than a specified threshold""" from __future__ import annotations from dataclasses import dataclass, field import matplotlib.pyplot as plt # relative imports can also be used if this is saved as a standalone file from verifyml.model_tests.ModelTest import ModelTest from verifyml.model_tests.utils import plot_to_str # converts plots to base64-encoded strings @dataclass class ListLength(ModelTest): """ A test to check that a given list contains more elements than a specified threshold. """ input_list: list[int] threshold: int # optional: stores plots to be displayed on the Model Card plots: dict[str, str] = field(repr=False, default_factory=dict) # optional test_name: str = 'A list length test' test_desc: str = 'A list length test description' def plot(self, save_plots: bool = True) -> None: """Plot the input list with matplotlib and save it as a base64 encoded string if save_plots is True. """ fig, ax = plt.subplots() # show the plot ax.plot(self.input_list) # optionally save the plot to the instance if save_plots: self.plots['my plot name'] = plot_to_str() def run(self) -> bool: """Runs test by checking if len(input_list) > threshold""" self.result = len(self.input_list) self.passed = self.result > self.threshold return self.passed ``` ## Demo Model Card that uses the newly defined test above ### Init 2 ListLength tests - 1 that passes, 1 that fails ``` # set threshold at 4 - tests only pass if list length > 4 threshold = 4 input_list_pass = [1, 2, 3, 4, 5] input_list_fail = [1, 2, 3, 4] list_length_test_pass = ListLength(input_list_pass, threshold) list_length_test_fail = ListLength(input_list_fail, threshold) # run tests and plot results, saving the plots in the process list_length_test_pass.run() list_length_test_fail.run() list_length_test_pass.plot() list_length_test_fail.plot() ``` ### Create a Model Card and attach the tests to it ``` import verifyml.model_card_toolkit as mctlib # init model card toolkit and model card mct = mctlib.ModelCardToolkit() mc = mct.scaffold_assets() # init model card test objects that will hold the tests mc_test_pass, mc_test_fail = mctlib.Test(), mctlib.Test() # assign the list length tests to them mc_test_pass.read_model_test(list_length_test_pass) mc_test_fail.read_model_test(list_length_test_fail) # create a fairness report with these as fairness tests fairness_report = mctlib.FairnessReport( type="Fairness report containing list length tests", tests=[mc_test_pass, mc_test_fail] ) # add the report into the model card mc.fairness_analysis.fairness_reports = [fairness_report] # update the model card's name mc.model_details.name = "demo model" # update the model card assets with this new information mct.update_model_card(mc) ``` ### Export Model Card HTML into a file and also display it ``` from IPython import display html = mct.export_format(output_file="list_length.html") display.display(display.HTML(html)) ```
github_jupyter
# Water heating An insulated, rigid tank contains 4 kg of water at 100 kPa, where initially 0.25 of the mass is liquid. An electric heater turns on and operates until all of the liquid has vaporized. (Neglect the heat capacity of the tank and heater.) ![Water heater](../../images/water-heater.png) **Problem:** - Determine the final temperature and pressure of the water. - Determine the electrical work required by this process. - Determine the total change in entropy associated with this process. - Plot the state points for the water on a temperature-specific entropy diagram. First, load the necessary modules and specify the known/initial conditions. ``` import matplotlib.pyplot as plt %matplotlib inline import numpy as np import cantera as ct import matplotlib_inline.backend_inline matplotlib_inline.backend_inline.set_matplotlib_formats('pdf', 'png') plt.rcParams['figure.dpi']= 150 plt.rcParams['savefig.dpi'] = 150 from pint import UnitRegistry ureg = UnitRegistry() Q_ = ureg.Quantity mass = Q_(4, 'kg') pressure_initial = Q_(100, 'kPa') quality_initial = 0.25 quality_final = 1.0 # specify the initial state using pressure and quality state_initial = ct.Water() state_initial.PQ = pressure_initial.to('Pa').magnitude, quality_initial state_initial() ``` ## Find final temperature and pressure Due to conservation of mass, since the mass and volume of the system are fixed, the specific volume and density must be constant: $$ v_2 = v_1 \\ \rho_2 = \rho_1 $$ Therefore the final state is fixed by the density and quality, where $x_2 = 1$: ``` state_final = ct.Water() state_final.DQ = state_initial.density, quality_final ``` Hmm, what happened here? It looks like Cantera unfortunately does not support specifying the thermodynamic state using density and quality. (With quality as one property, it only supports temperature or pressure as the other property.) Fortunately, CoolProp *does* support specifying the state the way we need to solve this problem, so let's use that for the final state: ``` from CoolProp.CoolProp import PropsSI temp_final = PropsSI( 'T', 'D', state_initial.density, 'Q', quality_final, 'water' ) * ureg.kelvin pres_final = PropsSI( 'P', 'D', state_initial.density, 'Q', quality_final, 'water' ) * ureg.pascal print(f'Final temperature: {temp_final: .2f}') print(f'Final pressure: {pres_final: .2f}') # We can then set the final state using the Cantera object, # now that we know temperature state_final = ct.Water() state_final.TQ = temp_final.magnitude, quality_final ``` ## Find electrical work required To find the work required, we can do an energy balance on the (closed) system: \begin{equation} W_{\text{in}} = m (u_2 - u_1) \end{equation} ``` work = mass * (Q_(state_final.u, 'J/kg') - Q_(state_initial.u, 'J/kg')) print(f'Electrical work required: {work.to(ureg.megajoule): .2f}') ``` ## Find entropy change The total entropy change is the change in entropy of the system plus that of the surroundings: $$ \Delta S_{\text{total}} = \Delta S_{\text{system}} + \Delta S_{\text{surr}} \\ \Delta S_{\text{total}} = \Delta S_{\text{system}} = m (s_2 - s_1) $$ since the entropy change of the surroundings is zero. ``` entropy_change = mass * (Q_(state_final.s, 'J/kg') - Q_(state_initial.s, 'J/kg')) print(f'Entropy change: {entropy_change: .2f}') ``` This process is irreversible, associated with a positive increase in total entropy. ## Plot the state points for water We can construct the saturated liquid and saturated vapor lines in a temperature–specific entropy diagram (T–s diagram), and then plot the initial and final states locations along with the process line (of constant density): ``` f = ct.Water() # Array of temperatures from fluid minimum temperature to critical temperature temps = np.arange(np.ceil(f.min_temp) + 0.15, f.critical_temperature, 1.0) def get_sat_entropy_fluid(T): '''Gets entropy for temperature along saturated liquid line''' f = ct.Water() f.TQ = T, 0.0 return f.s def get_sat_entropy_gas(T): '''Gets entropy for temperature along saturated vapor line''' f = ct.Water() f.TQ = T, 1.0 return f.s # calculate entropy values associated with temperatures along # saturation lines entropies_f = np.array([get_sat_entropy_fluid(T) for T in temps]) entropies_g = np.array([get_sat_entropy_gas(T) for T in temps]) # critical point f.TP = f.critical_temperature, f.critical_pressure fig, ax = plt.subplots(figsize=(5, 3)) # Plot the saturated liquid line, critical point, # and saturated vapor line ax.plot(entropies_f, temps) ax.plot(entropies_g, temps) ax.plot(f.s, f.T, 'o') plt.xlabel('Specific entropy (J/kg⋅K)') plt.ylabel('Temperature (K)') # Plot the initial and final states, and label them ax.plot(state_initial.s, state_initial.T, 's') ax.annotate('(1)', xy=(state_initial.s, state_initial.T), xytext=(0, -20), textcoords='offset points', ha='right', va='bottom' ) ax.plot(state_final.s, state_final.T, 's') ax.annotate('(2)', xy=(state_final.s, state_final.T), xytext=(20, 0), textcoords='offset points', ha='right', va='bottom' ) # show process line of constant density temps = np.arange(state_initial.T, state_final.T, 1.0) def get_entrophy(T, density): f = ct.Water() f.TD = T, density return f.s entropies = np.array([get_entrophy(T, state_initial.density) for T in temps]) ax.plot(entropies, temps, '--') plt.grid(True) fig.tight_layout() plt.show() ```
github_jupyter
``` %load_ext autoreload %autoreload 2 import numpy as np import seaborn as sns import itertools import matplotlib as mpl import matplotlib.pyplot as plt rc={'font.size': 10, 'axes.labelsize': 10, 'legend.fontsize': 10.0, 'axes.titlesize': 32, 'xtick.labelsize': 20, 'ytick.labelsize': 16} plt.rcParams.update(**rc) mpl.rcParams['axes.linewidth'] = .5 #set the value globally rc={'font.size': 8} #, 'axes.labelsize': 10, 'legend.fontsize': 10.0, # 'axes.titlesize': 32, 'xtick.labelsize': 20, 'ytick.labelsize': 16} plt.rcParams.update(**rc) plt_params = {'mathtext.default': 'regular' } plt_params.update(rc) plt.rcParams.update(plt_params) import torch from datasets import load_dataset, load_metric from transformers import AutoTokenizer, AutoModelForSequenceClassification, BertForSequenceClassification import tqdm as tqdm import os import torch import pickle import pandas as pd import inspect from transformers import ( DataCollatorWithPadding, Trainer,EvalPrediction,InputFeatures) from utils import set_up_dir # './*/all_flips_{}.p' is the output of {SST/IMDB}/run_{SST/IMDB}.py # SST data_sst = {} dataset_name = 'sst' for flip_case in ['generate', 'pruning']: root_dir = 'all_flips_{}_{}.p' root_dir_flow = 'all_flips_{}_{}.p' pfile = root_dir.format(flip_case, dataset_name) res_ = pickle.load(open(pfile, 'rb')) data_sst[flip_case] = res_ data_sets = {'sst':data_sst} from matplotlib import pyplot as plt from plot_utils import plot_flips params = { 'gi': ('#cc0000','-','GI'), 'lrp_detach_KQ_LNorm_Norm': ('#6495ED','-',r'LRP (AH+LN)'), 'lrp_detach_KQ': ('#6495ED','--',r'LRP (AH)'), 'rollout_2': ('#d400e7','dotted','Rollout'), 'GAE': ('#cd00cd','solid','Grad-Rollout'), 'attn_last': ('#F28500','solid','A-Last'), 'random': ('#4c4c4c','solid','Random')} flip_order = ['random', 'attn_last', 'rollout_2', 'GAE', 'gi', 'lrp_detach_KQ', 'lrp_detach_KQ_LNorm_Norm', ] save_dir = 'experiments/2502021_evaluation_revised' set_up_dir(save_dir) imfile = os.path.join(save_dir,'sst_overview.pdf') plot_flips(data_sets['sst'], flip_order=flip_order,params=params, imfile=imfile) flip_order = ['random', 'attn_last', 'saliency', 'rollout_2', 'chefer', 'gi', 'lrp_detach_KQ', 'lrp_detach_KQ_LNorm_Norm', ] imfile = os.path.join(save_dir,'imdb_overview.pdf') plot_flips(data_sets['imdb'], flip_order, params, imfile=imfile) # def make table from scipy.integrate import trapz, simps table_order = ['random', 'attn_last', 'saliency', 'attention_flow_2', 'rollout_2', 'gi', 'chefer', 'lrp_detach_KQ', 'lrp_detach_KQ_LNorm_Norm', ] metrics = [('generate', 'E'), ('pruning', 'E'), ('pruning', 'M')] metric_map = {'generate_E': 'AUAC', 'pruning_E': 'AUPC', 'pruning_M':'AU-MSE'} method_map = {} for k,v in params.items(): method_map[k] = v[2] fracs = np.linspace(0.,1.,11) dataname_map = {'sst': 'SST-2', 'imdb':'IMDB'} df_all = {} for flip_case, metric in metrics: data_table = [] for dataset in ['imdb', 'sst']: data_ = data_sets[dataset] flip_data = data_[flip_case] data_col = [] #['names']+ [dataset]] for i,k in enumerate(table_order): if k not in flip_data: f = '-' else: v = flip_data[k] f_ = np.nanmean(v[metric], axis=0) f = trapz(f_, fracs) f = '{:0.3f}'.format(f) data_col.append(f) data_table.append(['\rotatebox{90}{'+dataname_map[dataset]+'}'] + data_col) df_metric = pd.DataFrame(np.array(data_table), columns =['names'] + [method_map[k_] for k_ in table_order]) metric_name= metric_map[flip_case + '_' + metric] df_metric = df_metric.set_index('names') df_metric df_all[metric_name] = df_metric def _recover_control_sequences(s: str) -> str: s = s.replace(r"\}", "}") s = s.replace(r"\{", "{") s = s.replace(r"textbackslash ", "") return s str_ = df_all['AUAC'].T.to_latex(index=True) print(_recover_control_sequences(str_)) str_ = df_all['AUPC'].T.to_latex(index=True) print(_recover_control_sequences(str_)) str_ = df_all['AU-MSE'].T.to_latex(index=True) print(_recover_control_sequences(str_)) from plot_utils import plot_conservation res = pickle.load(open('/home/oeberle/explainable_embeds/XNLP/experiments/26012022_sst_conservation/conservation.p', 'rb')) savefile = 'experiments/conservation_sst.pdf' plot_conservation(res, savefile) res.keys() ```
github_jupyter
``` from sklearn.model_selection import cross_val_score, cross_val_predict, GridSearchCV, train_test_split from sklearn.metrics import precision_score, recall_score, f1_score, classification_report import pandas as pd import numpy as np from time import time from sklearn.preprocessing import MinMaxScaler from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC from sklearn.linear_model import SGDClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix from sklearn import metrics # WINDOW_SIZE = 257, CODEBOOK_SIZE = 10000 enable_norm = True X_train = np.loadtxt("feature_train.csv", delimiter = ",").reshape(-1,384) y_train = np.loadtxt("label_train.csv", delimiter = ",") X_test = np.loadtxt("feature_test.csv", delimiter=",").reshape(-1,384) y_test = np.loadtxt("label_test.csv", delimiter=",") if enable_norm: X_train = np.transpose(X_train) X_test = np.transpose(X_test) model_normalizer_horizontal = MinMaxScaler() model_normalizer_horizontal.fit(X_train) X_train = model_normalizer_horizontal.transform(X_train) model_normalizer_horizontal = MinMaxScaler() model_normalizer_horizontal.fit(X_test) X_test = model_normalizer_horizontal.transform(X_test) X_train = np.transpose(X_train) X_test = np.transpose(X_test) model_normalizer_vertical = MinMaxScaler() model_normalizer_vertical.fit(X_train) X_train = model_normalizer_vertical.transform(X_train) X_test = model_normalizer_vertical.transform(X_test) def plot_2d_space(X, y, label='Classes'): colors = ['#1F77B4', '#FF7F0E'] markers = ['o', 's'] for l, c, m in zip(np.unique(y), colors, markers): plt.scatter( X[y==l, 0], X[y==l, 1], c=c, label=l, marker=m ) plt.title(label) plt.legend(loc='upper right') plt.show() import imblearn from imblearn.combine import SMOTETomek smt = SMOTETomek(sampling_strategy='auto') X_smt, y_smt = smt.fit_sample(X_train, y_train) plot_2d_space(X_smt, y_smt, 'SMOTE + Tomek links') from imblearn.under_sampling import ClusterCentroids cc = ClusterCentroids(sampling_strategy='auto') X_cc, y_cc = cc.fit_sample(X_train, y_train) plot_2d_space(X_cc, y_cc, 'Cluster Centroids under-sampling') from imblearn.under_sampling import TomekLinks tl = TomekLinks(sampling_strategy='majority') X_tl, y_tl = tl.fit_sample(X_train, y_train) plot_2d_space(X_tl, y_tl, 'Tomek links under-sampling') from imblearn.over_sampling import SMOTE smote = SMOTE(sampling_strategy='minority') X_sm, y_sm = smote.fit_sample(X_train, y_train) plot_2d_space(X_sm, y_sm, 'SMOTE over-sampling') X_train, y_train = X_smt, y_smt X_test, y_test = smt.fit_sample(X_test, y_test) label_names = ['Buggy', 'Correct'] def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True): """ given a sklearn confusion matrix (cm), make a nice plot Arguments --------- cm: confusion matrix from sklearn.metrics.confusion_matrix target_names: given classification classes such as [0, 1, 2] the class names, for example: ['high', 'medium', 'low'] title: the text to display at the top of the matrix cmap: the gradient of the values displayed from matplotlib.pyplot.cm see http://matplotlib.org/examples/color/colormaps_reference.html plt.get_cmap('jet') or plt.cm.Blues normalize: If False, plot the raw numbers If True, plot the proportions Usage ----- plot_confusion_matrix(cm = cm, # confusion matrix created by # sklearn.metrics.confusion_matrix normalize = True, # show proportions target_names = y_labels_vals, # list of names of the classes title = best_estimator_name) # title of graph Citiation --------- http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html """ import matplotlib.pyplot as plt import numpy as np import itertools accuracy = np.trace(cm) / float(np.sum(cm)) misclass = 1 - accuracy if cmap is None: cmap = plt.get_cmap('Blues') plt.figure(figsize=(8, 6)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 1.5 if normalize else cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if normalize: plt.text(j, i, "{:0.4f}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") else: plt.text(j, i, "{:,}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) plt.show() # LR param_grid_ = {'C': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000], "penalty":["l1","l2"]} print('-> Processing 10-Fold Cross Validation and Grid Search\n') bow_search = GridSearchCV(LogisticRegression(), cv=10, param_grid=param_grid_, scoring='f1_micro', n_jobs=-1, verbose=10) t0 = time() bow_search.fit(X_train, y_train) training_time = round(time()-t0, 3) print('-> Done! Show Grid scores\n') print(bow_search.cv_results_,'\n\n') print("Best parameters set found on development set:\n") print(bow_search.best_params_,'\n') print("Grid scores on development set:\n") means = bow_search.cv_results_['mean_test_score'] stds = bow_search.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, bow_search.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print('\n\n') print("Detailed classification report:\n") print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.\n\n") t0 = time() y_true, y_pred = y_test, bow_search.predict(X_test) test_time = round(time()-t0, 3) cmat = confusion_matrix(y_true, y_pred) plot_confusion_matrix(cm = cmat, normalize = False, target_names = label_names, cmap = plt.get_cmap('Blues'), title = "Confusion Matrix LR Dataset_Norm = %s" % str(enable_norm)) plot_confusion_matrix(cm = cmat, target_names = label_names, cmap = plt.get_cmap('Blues'), title = "Normalized Confusion Matrix LR Dataset_Norm = %s" % str(enable_norm)) print('\n\n') print(classification_report(y_true, y_pred)) print() print('Accuracy', metrics.accuracy_score(y_pred,y_test)) print("Training time : {}\n".format(training_time)) print("Test time : {}\n".format(test_time)) print() # NB parameters = {'alpha': (1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 0.1, 1, 10, 100, 1000)} bow_search = GridSearchCV(MultinomialNB(), parameters, cv=10, scoring='f1_micro', n_jobs=-1, verbose=10) t0 = time() bow_search.fit(X_train, y_train) training_time = round(time()-t0, 3) print('-> Done! Show Grid scores\n') print(bow_search.cv_results_,'\n\n') print("Best parameters set found on development set:\n") print(bow_search.best_params_,'\n') print("Grid scores on development set:\n") means = bow_search.cv_results_['mean_test_score'] stds = bow_search.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, bow_search.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print('\n\n') print("Detailed classification report:\n") print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.\n\n") t0 = time() y_true, y_pred = y_test, bow_search.predict(X_test) test_time = round(time()-t0, 3) print(confusion_matrix(y_true, y_pred)) print('\n\n') print(classification_report(y_true, y_pred)) print() print('Accuracy', metrics.accuracy_score(y_pred,y_test)) print("Training time : {}\n".format(training_time)) print("Test time : {}\n".format(test_time)) print() # SGD param_grid_ = [ {'alpha': [0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01]} ] bow_search = GridSearchCV(SGDClassifier(max_iter=2), cv=10, param_grid=param_grid_, scoring='f1_micro', n_jobs=-1, verbose=10) t0 = time() bow_search.fit(X_train, y_train) training_time = round(time()-t0, 3) print('-> Done! Show Grid scores\n') print(bow_search.cv_results_,'\n\n') print("Best parameters set found on development set:\n") print(bow_search.best_params_,'\n') print("Grid scores on development set:\n") means = bow_search.cv_results_['mean_test_score'] stds = bow_search.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, bow_search.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print('\n\n') print("Detailed classification report:\n") print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.\n\n") t0 = time() y_true, y_pred = y_test, bow_search.predict(X_test) test_time = round(time()-t0, 3) print(confusion_matrix(y_true, y_pred)) print('\n\n') print(classification_report(y_true, y_pred)) print() print('Accuracy', metrics.accuracy_score(y_pred,y_test)) print("Training time : {}\n".format(training_time)) print("Test time : {}\n".format(test_time)) print() # RF param_grid_ = {"min_samples_leaf": [2, 3, 5, 7, 10, 100, 250, 500, 1000]} bow_search = GridSearchCV(RandomForestClassifier(), param_grid=param_grid_, cv=10, scoring='f1_micro', n_jobs=-1, verbose=10) t0 = time() bow_search.fit(X_train, y_train) training_time = round(time()-t0, 3) print('-> Done! Show Grid scores\n') print(bow_search.cv_results_,'\n\n') print("Best parameters set found on development set:\n") print(bow_search.best_params_,'\n') print("Grid scores on development set:\n") means = bow_search.cv_results_['mean_test_score'] stds = bow_search.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, bow_search.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print('\n\n') print("Detailed classification report:\n") print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.\n\n") t0 = time() y_true, y_pred = y_test, bow_search.predict(X_test) test_time = round(time()-t0, 3) print(confusion_matrix(y_true, y_pred)) print('\n\n') print(classification_report(y_true, y_pred)) print() print('Accuracy', metrics.accuracy_score(y_pred,y_test)) print("Training time : {}\n".format(training_time)) print("Test time : {}\n".format(test_time)) print() # DT param_grid_ = {"min_samples_leaf": [2, 3, 5, 7, 10, 100, 250, 500, 1000]} bow_search = GridSearchCV(DecisionTreeClassifier(), param_grid=param_grid_, cv=10, scoring='f1_micro', n_jobs=-1, verbose=10) t0 = time() bow_search.fit(X_train, y_train) training_time = round(time()-t0, 3) print('-> Done! Show Grid scores\n') print(bow_search.cv_results_,'\n\n') print("Best parameters set found on development set:\n") print(bow_search.best_params_,'\n') print("Grid scores on development set:\n") means = bow_search.cv_results_['mean_test_score'] stds = bow_search.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, bow_search.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print('\n\n') print("Detailed classification report:\n") print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.\n\n") t0 = time() y_true, y_pred = y_test, bow_search.predict(X_test) test_time = round(time()-t0, 3) print(confusion_matrix(y_true, y_pred)) print('\n\n') print(classification_report(y_true, y_pred)) print() print('Accuracy', metrics.accuracy_score(y_pred,y_test)) print("Training time : {}\n".format(training_time)) print("Test time : {}\n".format(test_time)) print() # KNN param_grid_ = {'n_neighbors': [1], 'weights': ['uniform', 'distance'], 'metric': ['euclidean', 'manhattan']} bow_search = GridSearchCV(KNeighborsClassifier(), param_grid=param_grid_, cv=10, scoring='f1_micro', n_jobs=-1, verbose=10) t0 = time() bow_search.fit(X_train, y_train) training_time = round(time()-t0, 3) print('-> Done! Show Grid scores\n') print(bow_search.cv_results_,'\n\n') print("Best parameters set found on development set:\n") print(bow_search.best_params_,'\n') print("Grid scores on development set:\n") means = bow_search.cv_results_['mean_test_score'] stds = bow_search.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, bow_search.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print('\n\n') print("Detailed classification report:\n") print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.\n\n") t0 = time() y_true, y_pred = y_test, bow_search.predict(X_test) test_time = round(time()-t0, 3) print(confusion_matrix(y_true, y_pred)) print('\n\n') print(classification_report(y_true, y_pred)) print() print('Accuracy', metrics.accuracy_score(y_pred,y_test)) print("Training time : {}\n".format(training_time)) print("Test time : {}\n".format(test_time)) print() ```
github_jupyter
# Salary recommandation using a score Author: Florian Gauthier The purpose of this Notebook is to find the best score in order to select the optimal salary change in the salary-based recommendation. The best score will be the one that allows the best compromise between a salary decrease and a raise in job offers, for every job groups & experience buckets. We first use the 10% sample (~1.2Go) dataset called `sample_10perc.csv` because the full dataset is too big. ``` import numpy as np import pandas as pd from pandas.tseries import offsets from paul_emploi.lib import cleaned_data from paul_emploi.modeling import salary_recommendation_model, job_offers_optimal_buckets import matplotlib.pyplot as plt from matplotlib import gridspec %matplotlib inline %load_ext autoreload %autoreload 2 ``` ## Import & clean job postings dataset Let's first `import_clean_job_offers` to import a clean database of job_offers. ``` etalab_path = "/var/data/bayes_impact/" postings = cleaned_data.job_offers(data_folder=etalab_path, filename_offers="job_offers/sample_10perc.csv") postings.head(2) ``` ## Filter postings Remove outliers & filter date. These modules will not be used in production since the filtering will be done before without pandas. ``` # Since our sample has data until early 2016, we use for now SMIC # 2015 to run some tests. As a maximum salary, we use 50.000€ (which is # arbitrary.) # SMIC amounts available here: http://www.salairebrutnet.fr/ SMIC_2015 = 17496 SMIC_2016 = 17604 _MIN_ANNUAL_SALARY = SMIC_2015 # Arbitrary. _MAX_ANNUAL_SALARY = 50000 def filter_postings( table_offers, min_salary=_MIN_ANNUAL_SALARY, max_salary=_MAX_ANNUAL_SALARY, nb_months=6): """Returns a table containing only job offers that fulfill specific criteria. It will be used for salary recommendations. Args : table_offers: pandas DataFrame containing job postings. min_salary (int.): exclude all salary under min_salary (ex : SMIC). max_salary (int.): exclude all salary above max_salary (ex: 50000). nb_months: number of months to look back to get job offers. Returns: a pandas DataFrame containing only job offers that fulfill specific criteria. It will be used for salary recommendations. """ clean_postings = table_offers.copy() # Exclude outliers. clean_postings = _filter_outliers( clean_postings, min_salary, max_salary) # Grab every offers until nb_months before. if not clean_postings.empty: # Or it will crash. clean_postings = _filter_date(clean_postings, nb_months) # Check if table is not empty. if clean_postings.empty: raise ValueError('No job offers match the given criteria.') interesting_cols = [ 'rome_id', 'annual_minimum_salary', 'experience_min_duration'] return clean_postings[interesting_cols] def _filter_date(table_offers, nb_months): """ Keep only the latest nb_months of the postings dataset. Args : table_offers: pandas DataFrame containing job postings. nb_months (int.): number of months to look back to get job offers. Returns: pandas DataFrame contaning offers of the latest nb_months of table_offers. """ table_offers.set_index(pd.DatetimeIndex( table_offers.date_debut_imputed), inplace=True) end_date = table_offers.date_debut_imputed.max() start_date = (end_date - offsets.DateOffset(months=nb_months)).strftime( '%Y-%m-%d') date_mask = (table_offers['date_debut_imputed'] >= start_date) return table_offers.loc[date_mask].reset_index(drop=True) def _filter_outliers( table_offers, min_salary, max_salary): """Returns a table containing only job offers without outliers. Args : table_offers: pandas DataFrame containing job postings. min_salary (int.): exclude all salary under min_salary (ex : SMIC). max_salary (int.): exclude all salary above max_salary (ex: 50000). Returns: pandas DataFrame containing only job offers with a salary between [min_salary, max_salary[. """ valid_salary_mask = (table_offers.annual_minimum_salary >= min_salary) & ( table_offers.annual_minimum_salary < max_salary) return table_offers.loc[valid_salary_mask] clean_postings = filter_postings(postings) clean_postings.head(2) ``` ### Make experience buckets ``` # Select some job groups (some contaning offers, other without so many) job_groups_examples = [ 'G1602', 'K2204', 'K1303', 'K1304', 'G1803', 'A1202', 'B1401'] postings_example = clean_postings.loc[clean_postings.rome_id.isin(job_groups_examples)] # Bucketize postings_example_bucketized = postings_example.groupby('rome_id').apply( job_offers_optimal_buckets.apply_bucketize) postings_example_bucketized.head(5) ``` ### Let's create 2 small subsets Each of them contains a single `exp_bucket` of a `rome_id`. ``` # Make a dataset contaning only 1 job group & 1 experience bucket postings_mask_1 = (postings_example_bucketized.rome_id == 'G1602') &\ (postings_example_bucketized.exp_bucket == '[0, 1[') postings_mask_2 = (postings_example_bucketized.rome_id == 'G1803') &\ (postings_example_bucketized.exp_bucket == '[0, 1[') postings_1 = postings_example_bucketized.loc[postings_mask_1] postings_2 = postings_example_bucketized.loc[postings_mask_2] postings_2.head(2) ``` ### Cumulative counts For each salary, we count the number of offers available. ``` num_offers_with_higher_salary = salary_recommendation_model._compute_job_offers_salary(postings_1) num_offers_with_higher_salary.head(2) ``` ## Chosing the best score We use the following modules to compute graphs on score. ``` def compute_result_as_df(table_offers, score_label='sqrtO_salary'): """ Compute every metrics of the score and store them in a pandas Dataframe """ num_offers_with_higher_salary = salary_recommendation_model._compute_job_offers_salary( table_offers) cumul_offers = num_offers_with_higher_salary.reset_index() def _scoring(idx): return _apply_score(num_offers_with_higher_salary, idx, score_label=score_label) result_as_df = pd.DataFrame(cumul_offers.reset_index()['index'].apply(_scoring).tolist()) result_as_df = pd.concat([cumul_offers, result_as_df], axis=1) return result_as_df def _apply_score( num_offers_with_higher_salary, idx, score_label): """ Calculate a score to each salaries of table_offers, maximize it and return the amount of gained offers for the optimal decrease of salary + additional metrics to compute score comparison. Args: num_offers_with_higher_salary: Pandas Series containing the amount of job offers (value) by salary (index). idx: the index of the salary on which to compute the score. score_label: label of the score we decided to compute. Returns: a dictionnary containing all the metrics """ # Cumulative count. cumul_offers = num_offers_with_higher_salary.reset_index() if idx == 0: return _fill_dict_of_res(0, cumul_offers.annual_minimum_salary.iloc[idx], 0, 0) delta_salaries = salary_recommendation_model._compute_delta_from_index( cumul_offers.annual_minimum_salary, idx) delta_offers = salary_recommendation_model._compute_delta_from_index( cumul_offers.num_offers_with_higher_salary, idx) # Compute score. scores = _compute_scores(delta_offers, delta_salaries, score_label) # Best score = max(score). idx_max_score = scores.idxmax() # Compute results. final_num_offers = cumul_offers.num_offers_with_higher_salary.iloc[idx_max_score] final_salary = cumul_offers.annual_minimum_salary.iloc[idx_max_score] gained_offers = delta_offers.iloc[idx_max_score] decrease_of_salary = delta_salaries.iloc[idx_max_score] return _fill_dict_of_res(idx_max_score, final_salary, decrease_of_salary, gained_offers) def _fill_dict_of_res(idx_max_score, final_salary, decrease_of_salary, gained_offers): return { 'idx_max_score': idx_max_score, 'final_salary': final_salary, 'decrease_of_salary': decrease_of_salary, 'gained_offers': gained_offers} def _compute_scores(offers, delta_salaries, score_label): """Compute different scores NOTE: 'O' stands for 'delta(O)' and 'S' stands for 'delta(S)', with 'delta(O)' being the variation of number of offers and 'delta(S)' the respective variation of salary. """ if score_label == 'sqrt(O)_S': score = np.sqrt(offers) / (delta_salaries) if score_label == 'sqrt(O)_S²': score = np.sqrt(offers) / (delta_salaries ** 2) if score_label == 'log(O)_S': score = np.log(offers) / (delta_salaries) if score_label == 'log(O)_S²': score = np.log(offers) / (delta_salaries ** 2) if score_label == 'O_S²': score = offers / (delta_salaries ** 2) return score table_offers = postings_1.copy() #table_offers = postings_2.copy() # Output example result_as_df = compute_result_as_df(table_offers=table_offers, score_label='sqrt(O)_S') result_as_df.head(2) ``` ## Score comparaison We're going to compare scores relatively to 3 metrics : * Recommended salaries * Recommended decrease of salary * Gained offers ### Recommended salaries * x-axis: salary indexes in ascending values (0: the lowest salary) * y-axis: recommended salary indexes (also in ascending values). ``` score_label_list = ['sqrt(O)_S', 'sqrt(O)_S²', 'log(O)_S', 'log(O)_S²', 'O_S²'] col_list = ['blue', 'green', 'red', 'darkturquoise', 'purple'] fig = plt.figure(figsize=(15, 15)) gs = gridspec.GridSpec(len(score_label_list), 1) num_fig = 0 num_col = 0 for score_label in score_label_list: ax = plt.subplot(gs[num_fig]) result_as_df = compute_result_as_df(table_offers=table_offers, score_label=score_label) result_as_df.idx_max_score.plot(label=score_label, ax=ax, color=col_list[num_col]) plt.xlabel('Salary indexes (ascending values)') plt.ylabel('Recommended salary indexes') plt.legend(loc='upper left') num_fig += 1 num_col += 1 ``` ### Recommended decrease of salary * x-axis: annual_minimul salary in ascending values * y-axis: recommended decrease of salary ``` fig = plt.figure(figsize=(15, 15)) gs = gridspec.GridSpec(len(score_label_list), 1) num_fig = 0 num_col = 0 for score_label in score_label_list: ax = plt.subplot(gs[num_fig]) result_as_df = compute_result_as_df(table_offers=table_offers, score_label=score_label) result_as_df.set_index('annual_minimum_salary').decrease_of_salary.plot( label=score_label, ax=ax, color=col_list[num_col]) plt.ylabel('Recommended decrease of salary') plt.legend(loc='upper left') num_fig += 1 num_col += 1 ``` ### Gained offers * x-axis: annual_minimul salary in ascending values * y-axis: Gained offers in percent. ``` fig = plt.figure(figsize=(15, 15)) gs = gridspec.GridSpec(len(score_label_list), 1) num_fig = 0 num_col = 0 for score_label in score_label_list: ax = plt.subplot(gs[num_fig]) result_as_df = compute_result_as_df(table_offers=table_offers, score_label=score_label) result_as_df.set_index('annual_minimum_salary').gained_offers.plot(label=score_label, ax=ax, color=col_list[num_col]) plt.ylabel('Gained offers (in %)') plt.legend(loc='upper left') num_col += 1 num_fig += 1 ``` ## And the best score is.... * `sqrt(O)/S` seems to be the best score. * `ln(O)/S ` and ` sqrt(O)/S` both give too much weight to `salary variation` and we end up always recommending the preceding salary (see first graphs on "Recommended salaries"). * `O/S²` gives too much weight on salary when the latter starts to grow up to a certain point. Then it recommends way too high decrease of salary. NOTE: `O=delta(O)` being the variation of number of offers and `S=delta(S)` the respective variation of salary.
github_jupyter
<center> <font size=5> <h1>Define working environment</h1> </font> </center> The following cells are used to: - Import needed libraries - Set the environment variables for Python, Anaconda, GRASS GIS and R statistical computing - Define the ["GRASSDATA" folder](https://grass.osgeo.org/grass73/manuals/helptext.html), the name of "location" and "mapset" where you will to work. **Import libraries** ``` ## Import libraries needed for setting parameters of operating system import os import sys ``` <center> <font size=3> <h3>Environment variables when working on Linux Mint</h3> </font> </center> **Set 'Python' and 'GRASS GIS' environment variables** Here, we set [the environment variables allowing to use of GRASS GIS](https://grass.osgeo.org/grass64/manuals/variables.html) inside this Jupyter notebook. Please change the directory path according to your own system configuration. ``` ### Define GRASS GIS environment variables for LINUX UBUNTU Mint 18.1 (Serena) # Check is environmental variables exists and create them (empty) if not exists. if not 'PYTHONPATH' in os.environ: os.environ['PYTHONPATH']='' if not 'LD_LIBRARY_PATH' in os.environ: os.environ['LD_LIBRARY_PATH']='' # Set environmental variables os.environ['GISBASE'] = '/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu' os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'bin') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'script') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib') #os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python') os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python') os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass') os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass','script') os.environ['PYTHONLIB'] = '/usr/lib/python2.7' os.environ['LD_LIBRARY_PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib') os.environ['GIS_LOCK'] = '$$' os.environ['GISRC'] = os.path.join(os.environ['HOME'],'.grass7','rc') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','bin') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','scripts') ## Define GRASS-Python environment sys.path.append(os.path.join(os.environ['GISBASE'],'etc','python')) ``` **Import GRASS Python packages** ``` ## Import libraries needed to launch GRASS GIS in the jupyter notebook import grass.script.setup as gsetup ## Import libraries needed to call GRASS using Python import grass.script as gscript from grass.script import core as grass ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** **Display current environment variables of your computer** ``` ## Display the current defined environment variables for key in os.environ.keys(): print "%s = %s \t" % (key,os.environ[key]) ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** <center> <font size=5> <h1>Define functions</h1> </font> </center> This section of the notebook is dedicated to defining functions which will then be called later in the script. If you want to create your own functions, define them here. ### Function for computing processing time The "print_processing_time" is used to calculate and display the processing time for various stages of the processing chain. At the beginning of each major step, the current time is stored in a new variable, using [time.time() function](https://docs.python.org/2/library/time.html). At the end of the stage in question, the "print_processing_time" function is called and takes as argument the name of this new variable containing the recorded time at the beginning of the stage, and an output message. ``` ## Import library for managing time in python import time ## Function "print_processing_time()" compute processing time and printing it. # The argument "begintime" wait for a variable containing the begintime (result of time.time()) of the process for which to compute processing time. # The argument "printmessage" wait for a string format with information about the process. def print_processing_time(begintime, printmessage): endtime=time.time() processtime=endtime-begintime remainingtime=processtime days=int((remainingtime)/86400) remainingtime-=(days*86400) hours=int((remainingtime)/3600) remainingtime-=(hours*3600) minutes=int((remainingtime)/60) remainingtime-=(minutes*60) seconds=round((remainingtime)%60,1) if processtime<60: finalprintmessage=str(printmessage)+str(seconds)+" seconds" elif processtime<3600: finalprintmessage=str(printmessage)+str(minutes)+" minutes and "+str(seconds)+" seconds" elif processtime<86400: finalprintmessage=str(printmessage)+str(hours)+" hours and "+str(minutes)+" minutes and "+str(seconds)+" seconds" elif processtime>=86400: finalprintmessage=str(printmessage)+str(days)+" days, "+str(hours)+" hours and "+str(minutes)+" minutes and "+str(seconds)+" seconds" return finalprintmessage ``` ### Function for creation of configuration file for r.li (landscape units provided as polygons) (multiprocessed) ``` ##### Function that create the r.li configuration file for a list of landcover raster. ### It enable to create in one function as many configuration file as the number of raster provided in 'listoflandcoverraster'. ### It could be use only in case study with a several landcover raster and only one landscape unit layer. ### So, the landscape unit layer if fixed and there are the landcover raster which change. # 'listoflandcoverraster' wait for a list with the name (string) of landcover rasters. # 'landscape_polygons' wait for the name (string) of the vector layer containing the polygons to be used as landscape units. # 'masklayerhardcopy' wait for a boolean value (True/False) depending if the user want to create hard copy of the landscape units mask layers or not. # 'returnlistpath' wait for a boolean value (True/False) according to the fact that a list containing the path to the configuration files is desired. # 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization. # Import libraries for multiprocessing import multiprocessing from multiprocessing import Pool from functools import partial # Function that copy the landscape unit raster masks on a new layer with name corresponding to the current 'landcover_raster' def copy_landscapeunitmasks(current_landcover_raster,base_landcover_raster,landscape_polygons,landscapeunit_bbox,cat): ### Copy the landscape units mask for the current 'cat' # Define the name of the current "current_landscapeunit_rast" layer current_landscapeunit_rast=current_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]+"_"+str(cat) base_landscapeunit_rast=base_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]+"_"+str(cat) # Copy the the landscape unit created for the first landcover map in order to match the name of the current landcover map gscript.run_command('g.copy', overwrite=True, quiet=True, raster=(base_landscapeunit_rast,current_landscapeunit_rast)) # Add the line to the text variable text="MASKEDOVERLAYAREA "+current_landscapeunit_rast+"|"+landscapeunit_bbox[cat] return text # Function that create the r.li configuration file for the base landcover raster and then for all the binary rasters def create_rli_configfile(listoflandcoverraster,landscape_polygons, masklayerhardcopy=False,returnlistpath=True,ncores=2): # Check if 'listoflandcoverraster' is not empty if len(listoflandcoverraster)==0: sys.exit("The list of landcover raster is empty and should contain at least one raster name") # Check if rasters provided in 'listoflandcoverraster' exists to avoid error in mutliprocessing for cur_rast in listoflandcoverraster: try: mpset=cur_rast.split("@")[1] except: mpset="" if cur_rast.split("@")[0] not in [x[0] for x in gscript.list_pairs(type='raster',mapset=mpset)]: sys.exit('Raster <%s> not found' %cur_rast) # Check if rasters provided in 'listoflandcoverraster' have the same extend and spatial resolution raster={} for x, rast in enumerate(raster_list): raster[x]=gscript.raster_info(rast) key_list=raster.keys() for x in key_list[1:]: for info in ('north','south','east','west','ewres','nsres'): if not raster[0][info]==raster[x][info]: sys.exit("Some raster provided in the list have different spatial resolution or extend, please check") # Get the version of GRASS GIS version=grass.version()['version'].split('.')[0] # Define the folder to save the r.li configuration files if sys.platform=="win32": rli_dir=os.path.join(os.environ['APPDATA'],"GRASS"+version,"r.li") else: rli_dir=os.path.join(os.environ['HOME'],".grass"+version,"r.li") if not os.path.exists(rli_dir): os.makedirs(rli_dir) ## Create an ordered list with the 'cat' value of landscape units to be processed. list_cat=[int(x) for x in gscript.parse_command('v.db.select', quiet=True, map=landscape_polygons, column='cat', flags='c')] list_cat.sort() # Declare a empty dictionnary which will contains the north, south, east, west values for each landscape unit landscapeunit_bbox={} # Declare a empty list which will contain the path of the configation files created listpath=[] # Declare a empty string variable which will contains the core part of the r.li configuration file maskedoverlayarea_1="" # Duplicate 'listoflandcoverraster' in a new variable called 'tmp_list' tmp_list=list(listoflandcoverraster) # Set the current landcover raster as the first of the list base_landcover_raster=tmp_list.pop(0) #The pop function return the first item of the list and delete it from the list at the same time # Loop trough the landscape units for cat in list_cat: # Extract the current landscape unit polygon as temporary vector tmp_vect="tmp_"+base_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]+"_"+str(cat) gscript.run_command('v.extract', overwrite=True, quiet=True, input=landscape_polygons, cats=cat, output=tmp_vect) # Set region to match the extent of the current landscape polygon, with resolution and alignement matching the landcover raster gscript.run_command('g.region', vector=tmp_vect, align=base_landcover_raster) # Rasterize the landscape unit polygon landscapeunit_rast=tmp_vect[4:] gscript.run_command('v.to.rast', overwrite=True, quiet=True, input=tmp_vect, output=landscapeunit_rast, use='cat', memory='3000') # Remove temporary vector gscript.run_command('g.remove', quiet=True, flags="f", type='vector', name=tmp_vect) # Set the region to match the raster landscape unit extent and save the region info in a dictionary region_info=gscript.parse_command('g.region', raster=landscapeunit_rast, flags='g') n=str(round(float(region_info['n']),5)) #the config file need 5 decimal for north and south s=str(round(float(region_info['s']),5)) e=str(round(float(region_info['e']),6)) #the config file need 6 decimal for east and west w=str(round(float(region_info['w']),6)) # Save the coordinates of the bbox in the dictionary (n,s,e,w) landscapeunit_bbox[cat]=n+"|"+s+"|"+e+"|"+w # Add the line to the maskedoverlayarea_1 variable maskedoverlayarea_1+="MASKEDOVERLAYAREA "+landscapeunit_rast+"|"+landscapeunit_bbox[cat]+"\n" # Compile the content of the r.li configuration file config_file_content="SAMPLINGFRAME 0|0|1|1\n" config_file_content+=maskedoverlayarea_1 config_file_content+="RASTERMAP "+base_landcover_raster+"\n" config_file_content+="VECTORMAP "+landscape_polygons+"\n" # Create a new file and save the content configfilename=base_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0] path=os.path.join(rli_dir,configfilename) listpath.append(path) f=open(path, 'w') f.write(config_file_content) f.close() # Continue creation of r.li configuration file and landscape unit raster the rest of the landcover raster provided while len(tmp_list)>0: # Initialize 'maskedoverlayarea_2' variable as an empty string maskedoverlayarea_2="" # Set the current landcover raster as the first of the list current_landcover_raster=tmp_list.pop(0) #The pop function return the first item of the list and delete it from the list at the same time if masklayerhardcopy: # If the user asked for hard copy of the landscape units mask layers # Copy all the landscape units masks for the current landcover raster p=Pool(ncores) #Create a pool of processes and launch them using 'map' function func=partial(copy_landscapeunitmasks,current_landcover_raster,base_landcover_raster,landscape_polygons,landscapeunit_bbox) # Set fixed argument of the function maskedoverlayarea_2=p.map(func,list_cat) # Launch the processes for as many items in the list and get the ordered results using map function p.close() p.join() # Compile the content of the r.li configuration file config_file_content="SAMPLINGFRAME 0|0|1|1\n" config_file_content+="\n".join(maskedoverlayarea_2)+"\n" config_file_content+="RASTERMAP "+current_landcover_raster+"\n" config_file_content+="VECTORMAP "+landscape_polygons+"\n" else: # If the user not asked for hard copy # Compile the content of the r.li configuration file config_file_content="SAMPLINGFRAME 0|0|1|1\n" config_file_content+=maskedoverlayarea_1 # If user do not asked for hard copy, the mask layers are the same than for the first configuration file config_file_content+="RASTERMAP "+current_landcover_raster+"\n" # But the name of the RASTERMAP should be the one of the current landcover raster config_file_content+="VECTORMAP "+landscape_polygons+"\n" # Create a new file and save the content configfilename=current_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0] path=os.path.join(rli_dir,configfilename) listpath.append(path) f=open(path, 'w') f.write(config_file_content) f.close() # Return a list of path of configuration files creates if option actived if returnlistpath: return list_cat,listpath else: return list_cat ``` ### Function for creation of binary raster from a categorical raster (multiprocessed) ``` ###### Function creating a binary raster for each category of a base raster. ### The function run within the current region. If a category do not exists in the current region, no binary map will be produce # 'categorical_raster' wait for the name of the base raster to be used. It is the one from which one binary raster will be produced for each category value # 'prefix' wait for a string corresponding to the prefix of the name of the binary raster which will be produced # 'setnull' wait for a boolean value (True, False) according to the fact that the output binary should be 1/0 or 1/null # 'returnlistraster' wait for a boolean value (True, False) regarding to the fact that a list containing the name of binary raster is desired as return of the function # 'category_list' wait for a list of interger corresponding to specific category of the base raster to be used # 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization # Import libraries for multiprocessing import multiprocessing from multiprocessing import Pool from functools import partial def create_binary_raster(categorical_raster,prefix="binary",setnull=False,returnlistraster=True,category_list=None,ncores=2): # Check if raster exists to avoid error in mutliprocessing try: mpset=categorical_raster.split("@")[1] except: mpset="" if categorical_raster not in gscript.list_strings(type='raster',mapset=mpset): sys.exit('Raster <%s> not found' %categorical_raster) # Check for number of cores doesnt exceed available nbcpu=multiprocessing.cpu_count() if ncores>=nbcpu: ncores=nbcpu-1 returnlist=[] #Declare empty list for return #gscript.run_command('g.region', raster=categorical_raster, quiet=True) #Set the region null='null()' if setnull else '0' #Set the value for r.mapcalc minclass=1 if setnull else 2 #Set the value to check if the binary raster is empty if category_list == None: #If no category_list provided category_list=[cl for cl in gscript.parse_command('r.category',map=categorical_raster,quiet=True)] for i,x in enumerate(category_list): #Make sure the format is UTF8 and not Unicode category_list[i]=x.encode('UTF8') category_list.sort(key=float) #Sort the raster categories in ascending. p=Pool(ncores) #Create a pool of processes and launch them using 'map' function func=partial(get_binary,categorical_raster,prefix,null,minclass) # Set the two fixed argument of the function returnlist=p.map(func,category_list) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function p.close() p.join() if returnlistraster: return returnlist #### Function that extract binary raster for a specified class (called in 'create_binary_raster' function) def get_binary(categorical_raster,prefix,null,minclass,cl): binary_class=prefix+"_"+cl gscript.run_command('r.mapcalc', expression=binary_class+'=if('+categorical_raster+'=='+str(cl)+',1,'+null+')',overwrite=True, quiet=True) if len(gscript.parse_command('r.category',map=binary_class,quiet=True))>=minclass: #Check if created binary is not empty return binary_class else: gscript.run_command('g.remove', quiet=True, flags="f", type='raster', name=binary_class) ``` ### Function for computation of spatial metrics at landscape level (multiprocessed) ``` ##### Function that compute different landscape metrics (spatial metrics) at landscape level. ### The metric computed are "dominance","pielou","renyi","richness","shannon","simpson". ### It is important to set the computation region before runing this script so that it match the extent of the 'raster' layer. # 'configfile' wait for the path (string) to the configuration file corresponding to the 'raster' layer. # 'raster' wait for the name (string) of the landcover map on which landscape metrics will be computed. # 'returnlistresult' wait for a boolean value (True/False) according to the fact that a list containing the path to the result files is desired. # 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization. # Import libraries for multiprocessing import multiprocessing from multiprocessing import Pool from functools import partial def compute_landscapelevel_metrics(configfile, raster, spatial_metric): filename=raster.split("@")[0]+"_%s" %spatial_metric outputfile=os.path.join(os.path.split(configfile)[0],"output",filename) if spatial_metric=='renyi': # The alpha parameter was set to 2 as in https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy gscript.run_command('r.li.%s' %spatial_metric, overwrite=True, input=raster,config=configfile,alpha='2', output=filename) else: gscript.run_command('r.li.%s' %spatial_metric, overwrite=True, input=raster,config=configfile, output=filename) return outputfile def get_landscapelevel_metrics(configfile, raster, returnlistresult=True, ncores=2): # Check if raster exists to avoid error in mutliprocessing try: mpset=raster.split("@")[1] except: mpset="" if raster not in gscript.list_strings(type='raster',mapset=mpset): sys.exit('Raster <%s> not found' %raster) # Check if configfile exists to avoid error in mutliprocessing if not os.path.exists(configfile): sys.exit('Configuration file <%s> not found' %configfile) # List of metrics to be computed spatial_metric_list=["dominance","pielou","renyi","richness","shannon","simpson"] # Check for number of cores doesnt exceed available nbcpu=multiprocessing.cpu_count() if ncores>=nbcpu: ncores=nbcpu-1 if ncores>len(spatial_metric_list): ncores=len(spatial_metric_list) #Adapt number of cores to number of metrics to compute #Declare empty list for return returnlist=[] # Create a new pool p=Pool(ncores) # Set the two fixed argument of the 'compute_landscapelevel_metrics' function func=partial(compute_landscapelevel_metrics,configfile, raster) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function returnlist=p.map(func,spatial_metric_list) p.close() p.join() # Return list of paths to result files if returnlistresult: return returnlist ``` ### Function for computation of spatial metrics at class level (multiprocessed) ``` ##### Function that compute different landscape metrics (spatial metrics) at class level. ### The metric computed are "patch number (patchnum)","patch density (patchdensity)","mean patch size(mps)", ### "coefficient of variation of patch area (padcv)","range of patch area size (padrange)", ### "standard deviation of patch area (padsd)", "shape index (shape)", "edge density (edgedensity)". ### It is important to set the computation region before runing this script so that it match the extent of the 'raster' layer. # 'configfile' wait for the path (string) to the configuration file corresponding to the 'raster' layer. # 'raster' wait for the name (string) of the landcover map on which landscape metrics will be computed. # 'returnlistresult' wait for a boolean value (True/False) according to the fact that a list containing the path to the result files is desired. # 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization. # Import libraries for multiprocessing import multiprocessing from multiprocessing import Pool from functools import partial def compute_classlevel_metrics(configfile, raster, spatial_metric): filename=raster.split("@")[0]+"_%s" %spatial_metric gscript.run_command('r.li.%s' %spatial_metric, overwrite=True, input=raster,config=configfile,output=filename) outputfile=os.path.join(os.path.split(configfile)[0],"output",filename) return outputfile def get_classlevel_metrics(configfile, raster, returnlistresult=True, ncores=2): # Check if raster exists to avoid error in mutliprocessing try: mpset=raster.split("@")[1] except: mpset="" if raster not in [x.split("@")[0] for x in gscript.list_strings(type='raster',mapset=mpset)]: sys.exit('Raster <%s> not found' %raster) # Check if configfile exists to avoid error in mutliprocessing if not os.path.exists(configfile): sys.exit('Configuration file <%s> not found' %configfile) # List of metrics to be computed spatial_metric_list=["patchnum","patchdensity","mps","padcv","padrange","padsd","shape","edgedensity"] # Check for number of cores doesnt exceed available nbcpu=multiprocessing.cpu_count() if ncores>=nbcpu: ncores=nbcpu-1 if ncores>len(spatial_metric_list): ncores=len(spatial_metric_list) #Adapt number of cores to number of metrics to compute # Declare empty list for return returnlist=[] # Create a new pool p=Pool(ncores) # Set the two fixed argument of the 'compute_classlevel_metrics' function func=partial(compute_classlevel_metrics,configfile, raster) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function returnlist=p.map(func,spatial_metric_list) p.close() p.join() # Return list of paths to result files if returnlistresult: return returnlist ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** <center> <font size=5> <h1>User inputs</h1> </font> </center> ``` ## Define a empty dictionnary for saving user inputs user={} ## Enter the path to GRASSDATA folder user["gisdb"] = "/home/tais/Documents/GRASSDATA_Spie2017subset_Ouaga" ## Enter the name of the location (existing or for a new one) user["location"] = "SPIE_subset" ## Enter the EPSG code for this location user["locationepsg"] = "32630" ## Enter the name of the mapset to use for segmentation user["mapsetname"] = "test_rli" ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** # Compute spatial metrics for deriving land use in street blocs **Launch GRASS GIS working session** ``` ## Set the name of the mapset in which to work mapsetname=user["mapsetname"] ## Launch GRASS GIS working session in the mapset if os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname)): gsetup.init(os.environ['GISBASE'], user["gisdb"], user["location"], mapsetname) print "You are now working in mapset '"+mapsetname+"'" else: print "'"+mapsetname+"' mapset doesn't exists in "+user["gisdb"] ``` **Set the path to the r.li folder for configuration file and for results** ``` os.environ # Define path of the outputfile (in r.li folder) version=grass.version()['version'].split('.')[0] # Get the version of GRASS GIS if sys.platform=="win32": rli_config_dir=os.path.join(os.environ['APPDATA'],"GRASS"+version,"r.li") rli_output_dir=os.path.join(os.environ['APPDATA'],"GRASS"+version,"r.li","output") else: rli_config_dir=os.path.join(os.environ['HOME'],"GRASS"+version,"r.li") rli_output_dir=os.path.join(os.environ['HOME'],".grass"+version,"r.li","output") if not os.path.exists(rli_config_dir): os.makedirs(rli_config_dir) if not os.path.exists(rli_output_dir): os.makedirs(rli_output_dir) # Print print "GRASS GIS add-on's r.li configuration files will be saved under <%s>."%(rli_config_dir,) print "GRASS GIS add-on's r.li outputs will be saved under <%s>."%(rli_output_dir,) ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** ### Define the name of the base landcover map and landscape units polygons ``` # Set the name of the 'base' landcover map baselandcoverraster="classif@test_rli" # Set the name of the vector polygon layer containing the landscape units landscape_polygons="streetblocks" ``` ### Import shapefile containing street blocks polygons ``` # Set the path to the shapefile containing streetblocks polygons pathtoshp="/media/tais/data/Dropbox/ULB/MAUPP/Landuse_mapping/Test_spatial_metrics_computation/Data/streetblocks_subset_grasscat.shp" # Import shapefile gscript.run_command('v.in.ogr', quiet=True, overwrite=True, input=pathtoshp, output=landscape_polygons) ``` ### Create binary rasters from the base landcover map ``` # Save time for computing processin time begintime=time.time() # Create as many binary raster layer as categorical values existing in the base landcover map gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region pref=baselandcoverraster.split("@")[0]+"_cl" #Set the prefix raster_list=[] # Initialize a empty list for results raster_list=create_binary_raster(baselandcoverraster, prefix=pref,setnull=True,returnlistraster=True, category_list=None,ncores=15) #Extract binary raster # Compute and print processing time print_processing_time(begintime,"Extraction of binary rasters achieved in ") # Insert the name of the base landcover map at first position in the list raster_list.insert(0,baselandcoverraster) # Display the raster to be used for landscape analysis raster_list ``` ## Create r.li configuration file for a list of landcover rasters ``` # Save time for computing processin time begintime=time.time() # Run creation of r.li configuration file and associated raster layers list_cats,list_configfile=create_rli_configfile(raster_list,landscape_polygons,masklayerhardcopy=False,returnlistpath=True,ncores=20) # Compute and print processing time print_processing_time(begintime,"Creation of r.li configuration files achieved in ") # Display the path to the configuration files created list_configfile ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** ## Compute spatial metrics at landscape level ``` # Initialize an empty list which will contains the resultfiles resultfiles=[] # Save time for computing processin time begintime=time.time() # Get the path to the configuration file for the base landcover raster currentconfigfile=list_configfile[0] # Get the name of the base landcover raster currentraster=raster_list[0] # Set the region to match the extent of the base raster gscript.run_command('g.region', raster=currentraster, quiet=True) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function resultfiles.append(get_landscapelevel_metrics(currentconfigfile, currentraster, returnlistresult=True, ncores=15)) # Compute and print processing time print_processing_time(begintime,"Computation of spatial metric achieved in ") resultfiles ``` ## Compute spatial metrics at class level ``` # Save time for computing processin time begintime=time.time() # Get a list with paths to the configuration file for class level metrics classlevelconfigfiles=list_configfile[1:] # Get a list with name of binary landcover raster for class level metrics classlevelrasters=raster_list[1:] for x,currentraster in enumerate(classlevelrasters[:]): # Get the path to the configuration file for the base landcover raster currentconfigfile=classlevelconfigfiles[x] # Set the region to match the extent of the base raster gscript.run_command('g.region', raster=currentraster, quiet=True) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function resultfiles.append(get_classlevel_metrics(currentconfigfile, currentraster, returnlistresult=True, ncores=10)) # Compute and print processing time print_processing_time(begintime,"Computation of spatial metric achieved in ") resultfiles # Flat the 'resultfiles' list which contains several lists resultfiles=[item for sublist in resultfiles for item in sublist] resultfiles ``` ### Change the results files from r.li to get the correct 'cat' value for each landscape unit ``` import csv, shutil from itertools import izip for f in resultfiles: f_in=open(f) f_tmp=open(f+'_tmp',"w") f_in_reader=csv.reader(f_in,delimiter='|') f_tmp_writer=csv.writer(f_tmp,delimiter='|') f_tmp_writer.writerow(['cat',"_".join(os.path.split(f)[-1].split("_")[1:])]) for i,row in enumerate(f_in_reader): newrow=[] newrow.append(list_cats[i]) newrow.append(row[1]) f_tmp_writer.writerow(newrow) f_in.close() f_tmp.close() os.remove(f) shutil.copy2(f+'_tmp',f) os.remove(f+'_tmp') ``` # Compute some special metrics ``` # Set the name of the nDSM layer ndsm="ndsm" # Set the name of the NDVI layer ndvi="ndvi" # Set the name of the NDWI layer ndwi="ndwi" # Set the prefix of SAR textures layer SAR_prefix="SAR_w" ``` ### Mean and standard deviation of SAR textures, NDVI, NDWI ``` # Set up a list with name of raster layer to be used ancillarylayers=[] ancillarylayers.append(ndvi) ancillarylayers.append(ndwi) [ancillarylayers.append(x) for x in gscript.list_strings("rast", pattern=SAR_prefix, flag='r')] #Append SAR textures print "Layer to be used :\n\n"+'\n'.join(ancillarylayers) # Set the path to the file for i.segment.stats results for metrics_ndvi_ndwi_sar metrics_ndvi_ndwi_sar=os.path.join(rli_output_dir,"metrics_ndvi_ndwi_sar") # Create a raster corresponding to the landscape units (for computing statistics using i.segment.stats) gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region raster_landscapeunits="temp_%s"%landscape_polygons.split("@")[0] gscript.run_command('v.to.rast', overwrite=True, input=landscape_polygons, output=raster_landscapeunits, use='cat') # Save time for computing processin time begintime=time.time() ###### Compute shape metrics as well as mean and stddev of ancillary layers for each landscape unit ## Set number of cores to be used ncores=len(ancillarylayers) nbcpu=multiprocessing.cpu_count() if ncores>=nbcpu: ncores=nbcpu-1 if ncores>len(ancillarylayers): ncores=len(ancillarylayers) #Adapt number of cores to number of metrics to compute # Run i.segment.stats gscript.run_command('i.segment.stats', overwrite=True, map=raster_landscapeunits, raster_statistics='stddev,median', area_measures='area,perimeter,compact_circle,compact_square,fd', rasters=','.join(ancillarylayers), csvfile=metrics_ndvi_ndwi_sar, processes=ncores) # Compute and print processing time print_processing_time(begintime,"Metrics computed in ") resultfiles.append(metrics_ndvi_ndwi_sar) resultfiles ``` ### Mean and standard deviation of building's height #### Create raster with nDSM value of 'buildings' pixels ``` # Set pixel value of 'buildings' on the 'baselandcoverraster' buildpixel=11 # Set the name of the new layer containing height of buildings buildings_height='buildings_height' # Set the path to the file for i.segment.stats results for metrics_ndvi_ndwi_sar metrics_buildings_height=os.path.join(rli_output_dir,"metrics_buildings_height") # Create temp fil which will contain intermediate results TMP_sumheights=grass.tempfile()+'_sumheights.csv' TMP_nbrbuildpixels=grass.tempfile()+'_nbrbuildpixels.csv' # Save time for computing processin time begintime=time.time() # Create a raster layer with height of pixels classified as 'buildings' gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region formula="%s=if(%s==%s, %s, 0)"%(buildings_height,baselandcoverraster,buildpixel,ndsm) gscript.mapcalc(formula, overwrite=True) # Compute and print processing time print_processing_time(begintime,"Creation of layer in ") # Save time for computing processin time begintime=time.time() # Compute sum of build pixels's height using i.segment.stats gscript.run_command('i.segment.stats', overwrite=True, map=raster_landscapeunits, raster_statistics='sum', flags='s', rasters='buildings_height', csvfile=TMP_sumheights,processes=ncores) # Compute number of built pixels using i.segment.stats binary_builtup_raster="%s_cl_%s"%(baselandcoverraster.split("@")[0],buildpixel) gscript.run_command('g.copy', overwrite=True, raster='%s,tmp'%binary_builtup_raster) gscript.run_command('r.null', map='tmp', null=0) gscript.run_command('i.segment.stats', overwrite=True, map=raster_landscapeunits, raster_statistics='sum', flags='s', rasters='tmp', csvfile=TMP_nbrbuildpixels,processes=ncores) # Compute and print processing time print_processing_time(begintime,"i.segment.stats run in ") # Save time for computing processin time begintime=time.time() # Improt library to be able to iterate on two files in the same loop from itertools import izip # Declare empty dictionnary tmp_dic={} for i, (line_from_file_1, line_from_file_2) in enumerate(izip(open(TMP_sumheights), open(TMP_nbrbuildpixels))): if i==0: continue f1_items=line_from_file_1.split("\n")[0].split("|") f2_items=line_from_file_2.split("\n")[0].split("|") key=f1_items[0] sumheight=f1_items[1] nbpixel=f2_items[1] try: mean_height=float(sumheight)/float(nbpixel) except ZeroDivisionError: mean_height=0 tmp_dic[key]=mean_height # Get the name of the first colum with open(TMP_sumheights) as f: column_a=f.next().split("\n")[0].split("|")[0] # Built the content of the file content=[] content.append((column_a,'mean_build_height')) for key in tmp_dic.keys(): content.append((key,tmp_dic[key])) # Create a new file fout=open(metrics_buildings_height,"w") writer=csv.writer(fout, delimiter='|') writer.writerows(content) fout.close() # Compute and print processing time print_processing_time(begintime,"Mean build pixels's height computed in ") # Remove temporary layers gscript.run_command('g.remove', flags='ef', type='raster', name='tmp') # Remove temporary files os.remove(TMP_sumheights) os.remove(TMP_nbrbuildpixels) resultfiles.append(metrics_buildings_height) resultfiles ``` ### Proportion of each of individual classes in the landcover map ``` remove temp_streetblocks raster layer ``` # Combine all .csv files together ``` ## Function which execute a left join using individual .csv files. ## This ddddddddddddd # The argument "indir" wait for a string containing the path to the directory where the individual .csv files are stored. # The argument "outfile" wait for a string containing the path to the output file to create. # The argument "overwrite" wait for True/False value allow or not to overwrite existing outfile. # The argument "pattern" wait for a string containing the pattern of filename to use. Use wildcards is possible (*.csv for all .csv files) import os,sys,csv import glob def leftjoin_csv(fileList,outfile,separator_in=";",separator_out=";",overwrite=False,pattern=None): # Stop execution if outputfile exitst and can not be overwriten if os.path.isfile(outfile) and overwrite==False: print "File '"+str(outfile)+"' aleady exists and overwrite option is not enabled." else: if os.path.isfile(outfile) and overwrite==True: # If outputfile exitst and can be overwriten os.remove(outfile) print "File '"+str(outfile)+"' has been overwrited." if len(fileList)<=1: #Check if there are at least 2 files in the list sys.exit("This function require at least two .csv files to be jointed together.") # Save all the value in a dictionnary with key corresponding to the first column headerdict={} outputdict={} for f in [open(f) for f in resultfiles]: fin=csv.reader(f, delimiter='|') for i,row in enumerate(f): row_items=row.split("\r")[0].split("\n")[0] key=row_items.split("|")[0] value=row_items.split("|")[1:] for v in value: if i==0: # If first line try: headerdict[key].append(v) except: headerdict[key]=[v,] else: try: outputdict[key].append(v) except: outputdict[key]=[v,] # Write the dictionnary with header in a the output csv file outputcsv=open(outfile,"w") for key in headerdict.keys(): outputcsv.write(key+separator_out) outputcsv.write(separator_out.join(headerdict[key])) outputcsv.write("\n") for key in outputdict.keys(): outputcsv.write(key+separator_out) outputcsv.write(separator_out.join(outputdict[key])) outputcsv.write("\n") outputcsv.close() # Create a .csvt file with type of each column csvt=open(outfile+"t","w") results=open(outfile,"r") header=results.next() typecolumn=[] typecolumn.append("Integer") for columns in header[1:]: typecolumn.append("Real") csvt.write(separator_out.join(typecolumn)) csvt.close() # Print what happend print str(len(fileList))+" individual .csv files were joint together." # return headerdict,outputdict # Join all result files together in a new .csv file outfile=os.path.join(rli_output_dir,"land_use_metrics.csv") leftjoin_csv(resultfiles, outfile, separator_in="|", separator_out=";", overwrite=True) ``` ### Display the .csv using pandas ``` import pandas as pd # Load the .csv file in a pandas dataframe df=pd.read_csv(outfile, sep=';',header=0) # Display the dataframe df ``` ### Move files to dedicated folder **Configuration files** ``` # Set the folder where to move the configuration files finalfolder='/home/tais/Documents/GRASSDATA_Spie2017subset_Ouaga/Results_spatial_metrics/rli_config' ## Create the folder if does not exists if not os.path.exists(finalfolder): os.makedirs(finalfolder) print "Folder '"+finalfolder+"' created" ## Copy the files to the final folder and remove them from the original folder for configfile in list_configfile: shutil.copy2(configfile,finalfolder) os.remove(configfile) ``` **Result files** ``` # Set the folder where to move the configuration files finalfolder='/home/tais/Documents/GRASSDATA_Spie2017subset_Ouaga/Results_spatial_metrics/rli_results' ## Create the folder if does not exists if not os.path.exists(finalfolder): os.makedirs(finalfolder) print "Folder '"+finalfolder+"' created" ## Copy the files to the final folder and remove them from the original folder for res_file in resultfiles: shutil.copy2(res_file,finalfolder) os.remove(res_file) # Copy the final csv file with all the results shutil.copy2(outfile,finalfolder) os.remove(outfile) shutil.copy2(outfile+'t',finalfolder) os.remove(outfile+'t') ``` ## Join the .csv file to the landscape unit polygon layer ``` # Import .csv as new table in GRASS csvfile=os.path.join(finalfolder,os.path.split(outfile)[-1]) gscript.run_command('db.in.ogr', overwrite=True, quiet=True, input=csvfile, output='spatial_metrics_table') # Join the vector layer with the new table gscript.run_command('v.db.join --quiet map=streetblocks@test_rli column=cat other_table=spatial_metrics_table other_column=cat_ ``` # Importing the NDVI layer ``` break ## Saving current time for processing time management begintime_ndvi=time.time() ## Import nDSM imagery print ("Importing NDVI raster imagery at " + time.ctime()) gscript.run_command('r.import', input="/media/tais/data/MAUPP/WorldView3_Ouagadougou/Orthorectified/mosaique_georef/NDVI/ndvi_georef_ordre2.TIF", output="ndvi", overwrite=True) # Mask null/nodata values gscript.run_command('r.null', map="ndvi") print_processing_time(begintime_ndvi, "imagery has been imported in ") ``` # Importing the nDSM layer ``` break ## Saving current time for processing time management begintime_ndsm=time.time() ## Import nDSM imagery print ("Importing nDSM raster imagery at " + time.ctime()) grass.run_command('r.import', input="/media/tais/data/MAUPP/WorldView3_Ouagadougou/Orthorectified/mosaique_georef/nDSM/nDSM_mosaik_georef_ordre2.tif", output="ndsm", overwrite=True) ## Define null value for specific value in nDSM raster. Adapt the value to your own data. # If there is no null value in your data, comment the next line grass.run_command('r.null', map="ndsm", setnull="-999") # Make histogram equalisation on grey color. grass.run_command('r.colors', flags='e', map='ndsm', color='grey') print_processing_time(begintime_ndsm, "nDSM imagery has been imported in ") ``` ### Masking the nDSM artifacts ``` break # Import vector with nDSM artifacts zones grass.run_command('v.in.ogr', overwrite=True, input="/media/tais/data/MAUPP/WorldView3_Ouagadougou/Masque_artifacts_nDSM/Ouaga_mask_artifacts_nDSM.shp", output="mask_artifacts_ndsm") ## Set computational region to match the default region grass.run_command('g.region', overwrite=True, raster="ndsm") # Rasterize the vector layer, with value "0" on the artifacts zones grass.run_command('v.to.rast', input='mask_artifacts_ndsm', output='mask_artifacts_ndsm', use='val', value='0', memory='5000') ## Set computational region to match the default region grass.run_command('g.region', overwrite=True, raster="ndsm") ## Create a new nDSM with artifacts filled with '0' value formula='tmp_artifact=nmin(ndsm,mask_artifacts_ndsm)' grass.mapcalc(formula, overwrite=True) ## Remove the artifact mask grass.run_command('g.remove', flags='f', type='raster', name="mask_artifacts_ndsm") ## Rename the new nDSM grass.run_command('g.rename', raster='tmp_artifact,ndsm', overwrite=True) ## Remove the intermediate nDSM layer grass.run_command('g.remove', flags='f', type='raster', name="tmp_artifact") ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** # Define input raster for computing statistics of segments ``` ## Display the name of rasters available in PERMANENT and CLASSIFICATION mapset print grass.read_command('g.list',type="raster", mapset="PERMANENT", flags='rp') print grass.read_command('g.list',type="raster", mapset=user["classificationA_mapsetname"], flags='rp') ## Define the list of raster layers for which statistics will be computed inputstats=[] inputstats.append("opt_blue") inputstats.append("opt_green") inputstats.append("opt_red") inputstats.append("opt_nir") inputstats.append("ndsm") inputstats.append("ndvi") print "Layer to be used to compute raster statistics of segments:\n"+'\n'.join(inputstats) ## Define the list of raster statistics to be computed for each raster layer rasterstats=[] rasterstats.append("min") rasterstats.append("max") rasterstats.append("range") rasterstats.append("mean") rasterstats.append("stddev") #rasterstats.append("coeff_var") # Seems that this statistic create null values rasterstats.append("median") rasterstats.append("first_quart") rasterstats.append("third_quart") rasterstats.append("perc_90") print "Raster statistics to be computed:\n"+'\n'.join(rasterstats) ## Define the list of area measures (segment's shape statistics) to be computed areameasures=[] areameasures.append("area") areameasures.append("perimeter") areameasures.append("compact_circle") areameasures.append("compact_square") areameasures.append("fd") print "Area measures to be computed:\n"+'\n'.join(areameasures) ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** <center> <font size=5> <h1>Compute objects' statistics</h1> </font> </center> ``` ## Saving current time for processing time management begintime_computeobjstat=time.time() ``` ## Define the folder where to save the results and create it if necessary In the next cell, please adapt the path to the directory where you want to save the .csv output of i.segment.uspo. ``` ## Folder in which save processing time output outputfolder="/media/tais/My_Book_1/MAUPP/Traitement/Ouagadougou/Segmentation_fullAOI_localapproach/Results/CLASSIF/stats_optical" ## Create the folder if does not exists if not os.path.exists(outputfolder): os.makedirs(outputfolder) print "Folder '"+outputfolder+"' created" ``` ### Copy data from other mapset to the current mapset Some data need to be copied from other mapsets into the current mapset. ### Remove current mask ``` ## Check if there is a raster layer named "MASK" if not grass.list_strings("rast", pattern="MASK", mapset=mapsetname, flag='r'): print 'There is currently no MASK' else: ## Remove the current MASK layer grass.run_command('r.mask',flags='r') print 'The current MASK has been removed' ``` ***Copy segmentation raster*** ``` ## Copy segmentation raster layer from SEGMENTATION mapset to current mapset grass.run_command('g.copy', overwrite=True, raster="segmentation_raster@"+user["segmentation_mapsetname"]+",segments") ``` ***Copy morphological zone (raster)*** ``` ## Copy segmentation raster layer from SEGMENTATION mapset to current mapset grass.run_command('g.copy', overwrite=True, raster="zone_morpho@"+user["segmentation_mapsetname"]+",zone_morpho") ``` ***Copy morphological zone (vector)*** ``` ## Copy segmentation raster layer from SEGMENTATION mapset to current mapset grass.run_command('g.copy', overwrite=True, vector="zone_morpho@"+user["segmentation_mapsetname"]+",zone_morpho") ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** # Compute statistics of segments (Full AOI extend) ### Compute statistics of segment using i.segment.stats The process is make to compute statistics iteratively for each morphological zones, used here as tiles. This section uses the ['i.segment.stats' add-on](https://grass.osgeo.org/grass70/manuals/addons/i.segment.stats.html) to compute statistics for each object. ``` ## Save name of the layer to be used as tiles tile_layer='zone_morpho'+'@'+mapsetname ## Save name of the segmentation layer to be used by i.segment.stats segment_layer='segments'+'@'+mapsetname ## Save name of the column containing area_km value area_column='area_km2' ## Save name of the column containing morphological type value type_column='type' ## Save the prefix to be used for the outputfiles of i.segment.stats prefix="Segstat" ## Save the list of polygons to be processed (save the 'cat' value) listofregion=list(grass.parse_command('v.db.select', map=tile_layer, columns='cat', flags='c'))[:] for count, cat in enumerate(listofregion): print str(count)+" cat:"+str(cat) ``` ``` ## Initialize a empty string for saving print outputs txtcontent="" ## Running i.segment.stats messagetoprint="Start computing statistics for segments to be classified, using i.segment.stats on "+time.ctime()+"\n" print (messagetoprint) txtcontent+=messagetoprint+"\n" begintime_isegmentstats=time.time() ## Compute total area to be processed for process progression information processed_area=0 nbrtile=len(listofregion) attributes=grass.parse_command('db.univar', flags='g', table=tile_layer.split("@")[0], column=area_column, driver='sqlite') total_area=float(attributes['sum']) messagetoprint=str(nbrtile)+" region(s) will be processed, covering an area of "+str(round(total_area,3))+" Sqkm."+"\n\n" print (messagetoprint) txtcontent+=messagetoprint ## Save time before looping begintime_isegmentstats=time.time() ## Start loop on morphological zones count=1 for cat in listofregion[:]: ## Save current time at loop' start. begintime_current_id=time.time() ## Create a computional region for the current polygon condition="cat="+cat outputname="tmp_"+cat grass.run_command('v.extract', overwrite=True, quiet=True, input=tile_layer, type='area', where=condition, output=outputname) grass.run_command('g.region', overwrite=True, vector=outputname, align=segment_layer) grass.run_command('r.mask', overwrite=True, raster=tile_layer, maskcats=cat) grass.run_command('g.remove', quiet=True, type="vector", name=outputname, flags="f") ## Save size of the current polygon and add it to the already processed area size=round(float(grass.read_command('v.db.select', map=tile_layer, columns=area_column, where=condition,flags="c")),2) ## Print messagetoprint="Computing segments's statistics for tile n°"+str(cat) messagetoprint+=" ("+str(count)+"/"+str(len(listofregion))+")" messagetoprint+=" corresponding to "+str(size)+" km2" print (messagetoprint) txtcontent+=messagetoprint+"\n" ## Define the csv output file name, according to the optimization function selected outputcsv=os.path.join(outputfolder,prefix+"_"+str(cat)+".csv") ## Compute statistics of objets using i.segment.stats only with .csv output (no vectormap output). grass.run_command('i.segment.stats', overwrite=True, map=segment_layer, rasters=','.join(inputstats), raster_statistics=','.join(rasterstats), area_measures=','.join(areameasures), csvfile=outputcsv, processes='20') ## Add the size of the zone to the already processed area processed_area+=size ## Print messagetoprint=print_processing_time(begintime_current_id, "i.segment.stats finishes to process th current tile in ") print (messagetoprint) txtcontent+=messagetoprint+"\n" remainingtile=nbrtile-count if remainingtile>0: messagetoprint=str(round((processed_area/total_area)*100,2))+" percent of the total area processed. " messagetoprint+="Still "+str(remainingtile)+" zone(s) to process."+"\n" print (messagetoprint) txtcontent+=messagetoprint+"\n" else: messagetoprint="\n" print (messagetoprint) txtcontent+=messagetoprint ## Adapt the count count+=1 ## Remove current mask grass.run_command('r.mask', flags='r') ## Compute processing time and print it messagetoprint=print_processing_time(begintime_isegmentstats, "Statitics computed in ") print (messagetoprint) txtcontent+=messagetoprint #### Write text file with log of processing time ## Create the .txt file for processing time output and begin to write f = open(os.path.join(outputfolder,mapsetname+"_processingtime_isegmentstats.txt"), 'w') f.write(mapsetname+" processing time information for i.segment.stats"+"\n\n") f.write(txtcontent) f.close() ## print print_processing_time(begintime_computeobjstat,"Object statistics computed in ") ``` ## Concatenate individuals .csv files and replace unwanted values BE CAREFUL! Before runing the following cells, please check your data to be sure that it makes sens to replace the 'nan', 'null', or 'inf' values with "0" ``` ## Define the outputfile for .csv containing statistics for all segments outputfile=os.path.join(outputfolder,"all_segments_stats.csv") print outputfile # Create a dictionary with 'key' to be replaced by 'values' findreplacedict={} findreplacedict['nan']="0" findreplacedict['null']="0" findreplacedict['inf']="0" # Define pattern of file to concatenate pat=prefix+"_*.csv" sep="|" ## Initialize a empty string for saving print outputs txtcontent="" ## Saving current time for processing time management begintime_concat=time.time() ## Print messagetoprint="Start concatenate individual .csv files and replacing unwanted values." print (messagetoprint) txtcontent+=messagetoprint+"\n" # Concatenate and replace unwanted values messagetoprint=concat_findreplace(outputfolder,pat,sep,findreplacedict,outputfile) print (messagetoprint) txtcontent+=messagetoprint+"\n" ## Compute processing time and print it messagetoprint=print_processing_time(begintime_concat, "Process achieved in ") print (messagetoprint) txtcontent+=messagetoprint+"\n" #### Write text file with log of processing time ## Create the .txt file for processing time output and begin to write filepath=os.path.join(outputfolder,mapsetname+"_processingtime_concatreplace.txt") f = open(filepath, 'w') f.write(mapsetname+" processing time information for concatenation of individual .csv files and replacing of unwanted values."+"\n\n") f.write(txtcontent) f.close() ``` # Create new database in postgresql ``` # User for postgresql connexion dbuser="tais" # Password of user dbpassword="tais" # Host of database host="localhost" # Name of the new database dbname="ouaga_fullaoi_localsegment" # Set name of schema for objects statistics stat_schema="statistics" # Set name of table with statistics of segments - FOR OPTICAL object_stats_table="object_stats_optical" break from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT # Connect to postgres database db=None db=pg.connect(dbname='postgres', user=dbuser, password=dbpassword, host=host) # Allow to create a new database db.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) # Execute the CREATE DATABASE query cur=db.cursor() #cur.execute('DROP DATABASE IF EXISTS ' + dbname) #Comment this to avoid deleting existing DB cur.execute('CREATE DATABASE ' + dbname) cur.close() db.close() ``` ### Create PostGIS Extension in the database ``` break # Connect to the database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() # Execute the query cur.execute('CREATE EXTENSION IF NOT EXISTS postgis') # Make the changes to the database persistent db.commit() # Close connection with database cur.close() db.close() ``` <center> <font size=4> <h2>Import statistics of segments in a Postgresql database</h2> </font> </center> ## Create new schema in the postgresql database ``` schema=stat_schema from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT # Connect to postgres database db=None db=pg.connect(dbname=dbname, user='tais', password='tais', host='localhost') # Allow to create a new database db.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) # Execute the CREATE DATABASE query cur=db.cursor() #cur.execute('DROP SCHEMA IF EXISTS '+schema+' CASCADE') #Comment this to avoid deleting existing DB try: cur.execute('CREATE SCHEMA '+schema) except Exception as e: print ("Exception occured : "+str(e)) cur.close() db.close() ``` ## Create a new table ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() # Drop table if exists: cur.execute("DROP TABLE IF EXISTS "+schema+"."+object_stats_table) # Make the changes to the database persistent db.commit() import csv # Create a empty list for saving of column name column_name=[] # Create a reader for the first csv file in the stack of csv to be imported pathtofile=os.path.join(outputfolder, outputfile) readercsvSubset=open(pathtofile) readercsv=csv.reader(readercsvSubset, delimiter='|') headerline=readercsv.next() print "Create a new table '"+schema+"."+object_stats_table+"' with header corresponding to the first row of file '"+pathtofile+"'" ## Build a query for creation of a new table with auto-incremental key-value (thus avoiding potential duplicates of 'cat' value) # All column data-types are set to 'text' in order to be able to import some 'nan', 'inf' or 'null' values present in statistics files # This table will allow to import all individual csv files in a single Postgres table, which will be cleaned after query="CREATE TABLE "+schema+"."+object_stats_table+" (" query+="key_value serial PRIMARY KEY" query+=", "+str(headerline[0])+" text" column_name.append(str(headerline[0])) for column in headerline[1:]: if column[0] in ('1','2','3','4','5','6','7','8','9','0'): query+="," query+=" "+"W"+str(column)+" double precision" column_name.append("W"+str(column)) else: query+="," query+=" "+str(column)+" double precision" column_name.append(str(column)) query+=")" # Execute the CREATE TABLE query cur.execute(query) # Make the changes to the database persistent db.commit() # Close cursor and communication with the database cur.close() db.close() ``` ## Copy objects statistics from csv to Postgresql database ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() ## Initialize a empty string for saving print outputs txtcontent="" ## Saving current time for processing time management begintime_copy=time.time() ## Print messagetoprint="Start copy of segments' statistics in the postgresql table '"+schema+"."+object_stats_table+"'" print (messagetoprint) txtcontent+=messagetoprint+"\n" # Create query for copy data from csv, avoiding the header, and updating only the column which are in the csv (to allow auto-incremental key value to wokr) query="COPY "+schema+"."+object_stats_table+"("+', '.join(column_name)+") " query+=" FROM '"+str(pathtofile)+"' HEADER DELIMITER '|' CSV;" # Execute the COPY FROM CSV query cur.execute(query) # Make the changes to the database persistent db.commit() ## Compute processing time and print it messagetoprint=print_processing_time(begintime_copy, "Process achieved in ") print (messagetoprint) txtcontent+=messagetoprint+"\n" #### Write text file with log of processing time ## Create the .txt file for processing time output and begin to write filepath=os.path.join(outputfolder,mapsetname+"_processingtime_PostGimport.txt") f = open(filepath, 'w') f.write(mapsetname+" processing time information for importation of segments' statistics in the PostGreSQL Database."+"\n\n") f.write(txtcontent) f.close() # Close cursor and communication with the database cur.close() db.close() ``` # Drop duplicate values of CAT Here, we will find duplicates. Indeed, as statistics are computed for each tile (morphological area) and computational region aligned to the pixels raster, some objets could appear in two different tile resulting on duplicates on "CAT" column. We firs select the "CAT" of duplicated objets and then puting them in a list. Then, for each duplicated "CAT", we select the key-value (primary key) of the smallest object (area_min). The row corresponding to those key-values are then remoed using the "DELETE FROM" query. ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() ## Initialize a empty string for saving print outputs txtcontent="" ## Saving current time for processing time management begintime_removeduplic=time.time() ## Print messagetoprint="Start removing duplicates in the postgresql table '"+schema+"."+object_stats_table+"'" print (messagetoprint) txtcontent+=messagetoprint+"\n" # Find duplicated 'CAT' find_duplicated_cat() # Remove duplicated count_pass=1 count_removedduplic=0 while len(cattodrop)>0: messagetoprint="Removing duplicates - Pass "+str(count_pass) print (messagetoprint) txtcontent+=messagetoprint+"\n" find_duplicated_key() remove_duplicated_key() messagetoprint=str(len(keytodrop))+" duplicates removed." print (messagetoprint) txtcontent+=messagetoprint+"\n" count_removedduplic+=len(keytodrop) # Find again duplicated 'CAT' find_duplicated_cat() count_pass+=1 messagetoprint="A total of "+str(count_removedduplic)+" duplicates were removed." print (messagetoprint) txtcontent+=messagetoprint+"\n" ## Compute processing time and print it messagetoprint=print_processing_time(begintime_removeduplic, "Process achieved in ") print (messagetoprint) txtcontent+=messagetoprint+"\n" #### Write text file with log of processing time ## Create the .txt file for processing time output and begin to write filepath=os.path.join(outputfolder,mapsetname+"_processingtime_RemoveDuplic.txt") f = open(filepath, 'w') f.write(mapsetname+" processing time information for removing duplicated objects."+"\n\n") f.write(txtcontent) f.close() # Vacuum the current Postgresql database vacuum(db) ``` # Change the primary key from 'key_value' to 'cat' ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() # Build a query to drop the current constraint on primary key query="ALTER TABLE "+schema+"."+object_stats_table+" \ DROP CONSTRAINT "+object_stats_table+"_pkey" # Execute the query cur.execute(query) # Make the changes to the database persistent db.commit() # Build a query to change the datatype of 'cat' to 'integer' query="ALTER TABLE "+schema+"."+object_stats_table+" \ ALTER COLUMN cat TYPE integer USING cat::integer" # Execute the query cur.execute(query) # Make the changes to the database persistent db.commit() # Build a query to add primary key on 'cat' query="ALTER TABLE "+schema+"."+object_stats_table+" \ ADD PRIMARY KEY (cat)" # Execute the query cur.execute(query) # Make the changes to the database persistent db.commit() # Build a query to drop column 'key_value' query="ALTER TABLE "+schema+"."+object_stats_table+" \ DROP COLUMN key_value" # Execute the query cur.execute(query) # Make the changes to the database persistent db.commit() # Vacuum the current Postgresql database vacuum(db) # Close cursor and communication with the database cur.close() db.close() ``` ### Show first rows of statistics ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Number of line to show (please limit to 100 for saving computing time) nbrow=15 # Query query="SELECT * FROM "+schema+"."+object_stats_table+" \ ORDER BY cat \ ASC LIMIT "+str(nbrow) # Execute query through panda df=pd.read_sql(query, db) # Show dataframe df.head(15) ``` <left> <font size=4> <b> End of classification part </b> </font> </left> ``` print("The script ends at "+ time.ctime()) print_processing_time(begintime_segmentation_full, "Entire process has been achieved in ") ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**
github_jupyter
### Imports ``` import torch from tqdm import tqdm import numpy as np from rdkit import Chem from rdkit import RDLogger RDLogger.DisableLog('rdApp.*') from rdkit.Chem import Draw from matplotlib import pyplot as plt from sklearn.metrics import roc_auc_score as ras from sklearn.metrics import mean_squared_error ``` ### Auglichem imports ``` from auglichem.molecule import Compose, RandomAtomMask, RandomBondDelete, MotifRemoval from auglichem.molecule.data import MoleculeDatasetWrapper from auglichem.molecule.models import GCN, AttentiveFP, GINE, DeepGCN ``` ### Set up dataset ``` # Create transformation transform = Compose([ RandomAtomMask([0.1, 0.3]), RandomBondDelete([0.1, 0.3]), MotifRemoval() ]) transform = RandomAtomMask(0.1) # Initialize dataset object dataset = MoleculeDatasetWrapper("ClinTox", data_path="./data_download", transform=transform, batch_size=128) # Get train/valid/test splits as loaders train_loader, valid_loader, test_loader = dataset.get_data_loaders("all") ``` ### Initialize model with task from data ``` # Get model num_outputs = len(dataset.labels.keys()) model = AttentiveFP(task=dataset.task, output_dim=num_outputs) # Uncomment the following line to use GPU #model.cuda() ``` ### Initialize traning loop ``` if(dataset.task == 'classification'): criterion = torch.nn.CrossEntropyLoss() elif(dataset.task == 'regression'): criterion = torch.nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5) ``` ### Train the model ``` for epoch in range(2): for bn, data in tqdm(enumerate(train_loader)): optimizer.zero_grad() loss = 0. # Get prediction for all data _, pred = model(data) # To use GPU, data must be cast to cuda #_, pred = model(data.cuda()) for idx, t in enumerate(train_loader.dataset.target): # Get indices where target has a value good_idx = np.where(data.y[:,idx]!=-999999999) # When the data is placed on GPU, target must come back to CPU #good_idx = np.where(data.y.cpu()[:,idx]!=-999999999) # Prediction is handled differently for classification and regression if(train_loader.dataset.task == 'classification'): current_preds = pred[:,2*(idx):2*(idx+1)][good_idx] current_labels = data.y[:,idx][good_idx] elif(train_loader.dataset.task == 'regression'): current_preds = pred[:,idx][good_idx] current_labels = data.y[:,idx][good_idx] loss += criterion(current_preds, current_labels) loss.backward() optimizer.step() ``` ### Test the model ``` def evaluate(model, test_loader, validation=False): set_str = "VALIDATION" if validation else "TEST" with torch.no_grad(): # All targets we're evaluating target_list = test_loader.dataset.target # Dictionaries to keep track of predictions and labels for all targets all_preds = {target: [] for target in target_list} all_labels = {target: [] for target in target_list} model.eval() for data in test_loader: # Get prediction for all data _, pred = model(data) # To use GPU, data must be cast to cuda #_, pred = model(data.cuda()) for idx, target in enumerate(target_list): # Get indices where target has a value good_idx = np.where(data.y[:,idx]!=-999999999) # When the data is placed on GPU, target must come back to CPU #good_idx = np.where(data.y.cpu()[:,idx]!=-999999999) # Prediction is handled differently for classification and regression if(train_loader.dataset.task == 'classification'): current_preds = pred[:,2*(idx):2*(idx+1)][good_idx][:,1] current_labels = data.y[:,idx][good_idx] elif(train_loader.dataset.task == 'regression'): current_preds = pred[:,idx][good_idx] current_labels = data.y[:,idx][good_idx] # Save predictions and targets all_preds[target].extend(list(current_preds.detach().cpu().numpy())) all_labels[target].extend(list(current_labels.detach().cpu().numpy())) scores = {target: None for target in target_list} for target in target_list: if(test_loader.dataset.task == 'classification'): scores[target] = ras(all_labels[target], all_preds[target]) print("{0} {1} ROC: {2:.5f}".format(target, set_str, scores[target])) elif(test_loader.dataset.task == 'regression'): scores[target] = mean_squared_error(all_labels[target], all_preds[target], squared=False) print("{0} {1} RMSE: {2:.5f}".format(target, set_str, scores[target])) evaluate(model, valid_loader, validation=True) evaluate(model, test_loader) ``` ### Model saving/loading example ``` # Save model torch.save(model.state_dict(), "./saved_models/example_gcn") # Instantiate new model and evaluate model = AttentiveFP(task=dataset.task, output_dim=num_outputs) evaluate(model, test_loader) # Load saved model and evaluate model.load_state_dict(torch.load("./saved_models/example_gcn")) evaluate(model, test_loader) ```
github_jupyter
# SLE-GAN ![Sample of images generated by SLE-GAN](https://github.com/sony/nnabla-examples/raw/master/GANs/slegan/example.png) This example demonstrates [SLE-GAN](https://arxiv.org/abs/2101.04775), which learns to generate images from small datasets. # Preparation Let's start by installing nnabla and accessing [nnabla-examples repository](https://github.com/sony/nnabla-examples). If you're running on Colab, make sure that your Runtime setting is set as GPU, which can be set up from the top menu (Runtime → change runtime type), and make sure to click **Connect** on the top right-hand side of the screen before you start. ``` !pip install nnabla-ext-cuda100 !git clone https://github.com/sony/nnabla-examples.git %cd nnabla-examples/GANs/slegan ``` Now, select a type of object that you want to generate from the drop-down menu. We have many object categories available ranging from animals to landmark architecutres. **Make sure to run the cell after making a choice from the drop-down menu.** ``` dataset = "Grumpy_Cat" #@param ["Grumpy_Cat", "Bridge_of_Sighs", "Medici_Fountain", "Obama", "Panda", "Temple_of_Heaven", "Wuzhen", "Dog"] ``` Depending on your choice above, the following cell will download the pre-trained weight parameters. ``` if dataset == "Grumpy_Cat": url1 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/cat/Gen_iter100000.h5" url2 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/cat/GenEMA_iter100000.h5" elif dataset == "Bridge_of_Sighs": url1 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/bridge/Gen_iter100000.h5" url2 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/bridge/GenEMA_iter100000.h5" elif dataset == "Medici_Fountain": url1 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/fountain/Gen_iter100000.h5" url2 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/fountain/GenEMA_iter100000.h5" elif dataset == "Obama": url1 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/obama/Gen_iter100000.h5" url2 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/obama/GenEMA_iter100000.h5" elif dataset == "Panda": url1 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/panda/Gen_iter100000.h5" url2 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/panda/GenEMA_iter100000.h5" elif dataset == "Temple_of_Heaven": url1 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/temple/Gen_iter100000.h5" url2 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/temple/GenEMA_iter100000.h5" elif dataset == "Wuzhen": url1 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/wuzhen/Gen_iter100000.h5" url2 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/wuzhen/GenEMA_iter100000.h5" elif dataset == "Dog": url1 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/dog/Gen_iter100000.h5" url2 = "https://nnabla.org/pretrained-models/nnabla-examples/GANs/slegan/dog/GenEMA_iter100000.h5" !wget $url1 $url2 !mkdir $dataset !mv *.h5 $dataset ``` # Generation Now, let's generate the images by simply running the following cell! You can change the number of images to generate and the image size to generate by modifying the numbers after `--batch-size` and `--image-size`. ``` !python generate.py --model-load-path $dataset --batch-size 8 --image-size 256 ``` Images have been generated now. Let's see how they look! ``` from IPython.display import Image,display fname = './result/tmp/Image-Tile/000000.png' display(Image(fname)) ``` Also give it a try with other object categories from the drop-down menu above! Hope you have fun!
github_jupyter
# Automatic peak finding and calibration tools in Becquerel `Becquerel` contains tools for obtaining a rough first calibration for an uncalibrated `Spectrum`. First, some imports: ``` %matplotlib inline import os import matplotlib.pyplot as plt import numpy as np import becquerel as bq ``` Also some function definitions: ``` def plot_spec(spectrum, xmode='channel'): if xmode == 'channel': facecolor = 'green' else: facecolor = 'blue' plt.figure() spectrum.fill_between(xmode=xmode, facecolor=facecolor, alpha=0.4, ax=plt.gca()) spectrum.plot('k-', lw=0.7, xmode=xmode, ax=plt.gca()) if xmode == 'channel': plt.xlim(0, spectrum.bin_edges_raw.max()) plt.title('Uncalibrated spectrum') else: plt.xlim(0, spectrum.bin_centers_kev[-1]) plt.title('Calibrated spectrum') plt.yscale('log') plt.ylim(2e-1) plt.tight_layout() def plot_calibrator(cal): cal.peakfinder.spectrum.apply_calibration(cal.cal) print('fit gain:', cal.gain, 'keV/channel') print('fit channels:', cal.fit_channels) plt.figure() plt.title('Peaks used in fit') cal.plot() plt.tight_layout() plot_spec(cal.peakfinder.spectrum, xmode='channel') for x, erg in zip(cal.fit_channels, cal.fit_energies): chan = cal.peakfinder.spectrum.find_bin_index(x, use_kev=False) y = cal.peakfinder.spectrum.counts_vals[chan-10:chan+10].max() * 1.5 plt.plot([x, x], [1e-1, y], 'r-', alpha=0.5) plt.text(x, y, '{:.1f} keV'.format(erg)) plot_spec(cal.peakfinder.spectrum, xmode='energy') for erg in cal.fit_energies: x = int(erg / cal.gain) chan = cal.peakfinder.spectrum.find_bin_index(x, use_kev=False) y = cal.peakfinder.spectrum.counts_vals[chan-15:chan+15].max() * 1.5 plt.plot([erg, erg], [1e-1, y], 'r-', alpha=0.5) plt.text(erg, y, '{:.1f} keV'.format(erg)) ``` ## `PeakFilter` classes Instances of `PeakFilter` classes generate energy-dependent kernels that can be convolved with a spectrum to extract lines from the background continuum. To instantiate a kernel, the FWHM in channels at a specific channel is required, and the kernel scales the FWHM so that it is proportional to the square root of the channel (approximating the energy resolution of a detector). Here is what a `GaussianDerivPeakFilter` looks like: ``` # demonstrate energy-dependent kernels channels = np.arange(1000) for kernel in [bq.GaussianPeakFilter(1000, 50, 5)]: plt.figure() plt.title('{} evaluated at different channels'.format(type(kernel).__name__)) ind = np.arange(1000) plt.plot([-50, 50], [0, 0], 'k-') for chan in range(100, 900, 100): kern = kernel.kernel(chan, np.arange(1001)) plt.plot(ind - chan, kern, '-', lw=1.5, label='Channel {}'.format(chan)) plt.xlim(-50, 50) plt.xlabel('offset from channel') plt.ylabel('kernel value') plt.legend() plt.tight_layout() ``` We will use the `GaussiaPeakKernel` from now on. A kernel can create a matrix that can be multiplied with a spectrum to perform the convolution. Here is what such a matrix could look like: ``` # display the kernel matrix kernel = bq.GaussianPeakFilter(1000, 50, 5) plt.figure() plt.title('Matrix of GaussianPeakFilter evaluated across entire spectrum') kernel.plot_matrix(np.arange(1000)) plt.tight_layout() ``` ## `PeakFinder` and `AutoCalibrator` classes The `PeakFinder` class allows one to automatically select peaks that a `PeakFilter` filters out of the spectrum. The `AutoCalibrator` class takes the peaks found by a `PeakFinder` and finds the most likely energies associated with those peaks. It is easiest to explain these classes using examples. ## Example 1: Calibrating a scintillator spectrum First we read in a raw spectrum from file (this is a simulated background spectrum for a scintillator): ``` counts = [] filename = os.path.join(os.path.dirname(bq.__file__), '../tests/samples/sim_spec.spe') spec = bq.Spectrum.from_file(filename) spec = spec.combine_bins(4) spec.bin_edges_raw *= 4 plot_spec(spec) plt.figure() plt.plot(spec.bin_centers_raw, spec.counts_vals) plt.yscale('log') plt.show() ``` To filter this spectrum we will use a kernel with a width of 50 channels at 500 channels, to match the strong line in the center (most likely the K-40 line at 1460 keV): ``` kernel = bq.GaussianPeakFilter(500, 50, fwhm_at_0=10) ``` ### 1.1 `PeakFinder` class The `PeakFinder` class uses a `PeakFilter` to filter and calibrate the spectrum. Under the hood, the kernel estimates the SNR of each peak by separating peaks from the background continuum. We can introspect this process using the `PeakFinder` instance: ``` # show how the kernel estimates the peaks+background and the background finder = bq.PeakFinder(spec, kernel) plt.figure() plt.plot(spec.counts_vals.clip(1e-1), label='Raw spectrum') plt.plot(finder._peak_plus_bkg.clip(1e-1), label='Peaks+Continuum') plt.plot(finder._bkg.clip(1e-1), label='Continuum') plt.plot(finder._signal.clip(1e-1), label='Peaks') plt.yscale('log') plt.xlim(0, len(spec)) plt.ylim(3e-1) plt.xlabel('Channels') plt.ylabel('Counts') plt.legend() plt.tight_layout() ``` The kernel applied directly to the spectral count data produces the estimated signal-to-noise (SNR) of each peak. ``` # plot signal to noise plt.figure() plt.title('Kernel applied to spectrum') finder.plot() plt.tight_layout() ``` ### 1.2 Using `find_peak` to find a specific peak Use the method `find_peak` to find a specific peak in the spectrum. Let's try to locate the index of the tallest peak, right in the middle of the spectrum: ``` peak_chan = finder.find_peak(500, min_snr=3.) print(peak_chan) plt.figure() plt.title('find_peak') finder.plot() plt.xlim(0,1000) plt.tight_layout() finder.centroids ``` Subsequent calls to `find_peak` will store the any new results: ``` peak_chan = finder.find_peak(900, min_snr=3.) print(peak_chan) plt.figure() plt.title('find_peak') finder.plot() plt.tight_layout() ``` #### 1.2 Use `reset` to remove all candidate peaks and calibration data The list of candidate peaks will persist in the `PeakFinder` object, as will any calibration information (will be covered later). Resetting the current object yields: ``` finder.reset() plt.figure() plt.title('after reset') finder.plot() plt.tight_layout() ``` ### 1.2 Using `find_peaks` to find all peaks above an SNR threshold Instead of repeatedly calling `find_peak`, one can build up a set of peak candidates using `find_peaks`. The following locates all peaks above channel 50 and an SNR of 2: ``` finder.find_peaks(min_snr=1, xmin=50) print(finder.centroids) print(finder.snrs) plt.figure() plt.title('find_peaks') finder.plot() plt.tight_layout() ``` ### 1.4 The `AutoCalibrator.fit` method The main machinery of auto-calibration is the `fit` method, which matches peak candidates (e.g., the outputs of `find_peaks`) with specific line energies and keeps the best match: ``` cal = bq.AutoCalibrator(finder) cal.fit( [351.93, 609.32, 1460.82, 2614.3], optional=[295.22, 768.36, 1120.294, 1238.122, 1764.49], gain_range=[2.5e-2, 4e2], de_max=200., ) plot_calibrator(cal) ``` ### 1.5 `AutoCalibrator.fit` with only one peak A special case of the calibrator is when only one peak has been found and only one energy is given. Use this with caution since there is none of the cross-validation that comes with multiple lines. ``` cal.peakfinder.reset() cal.peakfinder.fwhm_tol=(0.5, 1.2) cal.peakfinder.find_peak(500, min_snr=3.) cal.fit([1460.82], gain_range=[2.5e-1, 4e1], de_max=50.) plot_calibrator(cal) # looks like there may be an off-by-one or bin center vs edge issue in plotting... ``` ## Example 2: Calibrating an HPGe spectrum Let's perform the same calibration steps using an HPGe spectrum. This spectrum will have many more lines to fit. ``` # read raw HPGe data filename = os.path.join(os.path.dirname(bq.__file__), '../tests/samples/Mendocino_07-10-13_Acq-10-10-13.Spe') spec = bq.Spectrum.from_file(filename) plot_spec(spec) ``` We will again use a `GaussianDerivKernel`, but this one must be much narrower to match the resolution. Not surprisingly, many of the peaks in the spectrum have higher SNR values: ``` # apply the kernel to the data to get SNR kernel = bq.GaussianPeakFilter(3700, 10, fwhm_at_0=5) finder = bq.PeakFinder(spec, kernel) cal = bq.AutoCalibrator(finder) plt.figure() plt.title('Kernel applied to spectrum') cal.peakfinder.plot() plt.tight_layout() # find significant peaks cal.peakfinder.find_peaks(min_snr=15, xmin=400) print(cal.peakfinder.centroids) print(cal.peakfinder.snrs) plt.figure() plt.title('find_peaks') cal.peakfinder.plot() plt.tight_layout() # perform calibration cal.fit( [295.22, 351.93, 511.0, 609.32, 1460.82, 2614.3], optional=[583.187, 911.20, 1120.294, 1238.122, 1377.67, 1764.49, 2204.06], gain_range=[0.35, 0.40], de_max=5., ) plot_calibrator(cal) ``` ## Example 3: An unusual NaI spectrum This example shows a real spectrum from a NaI detector with very poor energy resolution and where the dynamic range has cut off the higher energies. Can we still calibrate it? ``` counts = [] filename = os.path.join(os.path.dirname(bq.__file__), '../tests/samples/nai_detector.spe') spec = bq.Spectrum.from_file(filename) plot_spec(spec) kernel = bq.GaussianPeakFilter(700, 50, 10) finder = bq.PeakFinder(spec, kernel) cal = bq.AutoCalibrator(finder) # find significant peaks cal.peakfinder.find_peaks(min_snr=3, xmin=100) print(cal.peakfinder.centroids) print(cal.peakfinder.snrs) plt.figure() plt.title('find_peaks') cal.peakfinder.plot() plt.tight_layout() # perform calibration cal.fit( [609.32, 1460.82], optional=[], gain_range=[0.1, 5.], de_max=50., ) plot_calibrator(cal) ``` That did not work right, the calibrator matched with the wrong lines. To fix this, we could either increase `xmin` to exclude the lower energy lines, increase `min_snr` to exclude the lower significance lines, or add optional energies. Let's try the same fit but with a longer list of prominent background lines: ``` # perform calibration again, but with more optional energies cal.fit( [609.32, 1460.82], optional=[238.63, 338.32, 351.93, 911.20, 1120.294, 1620.50, 1764.49, 2118.514], gain_range=[0.1, 5.], de_max=50., ) plot_calibrator(cal) ``` Success! The cross-validation used in `AutoCalibrator.fit` was able to find a better match. ## Example 4: CsI detector with Ba-133 and Cs-137 sources This data is from a small detector with Ba-133 and Cs-137 sources near it. We want to use those sources' lines and any strong backgroud lines to calibrate it. ``` counts = [] filename = os.path.join(os.path.dirname(bq.__file__), '../tests/samples/SGM102432.spe') spec = bq.Spectrum.from_file(filename) plot_spec(spec) kernel = bq.GaussianPeakFilter(2400, 120, 30) finder = bq.PeakFinder(spec, kernel) cal = bq.AutoCalibrator(finder) # find significant peaks cal.peakfinder.find_peaks(min_snr=3, xmin=200) print(cal.peakfinder.centroids) print(cal.peakfinder.snrs) plt.figure() plt.title('find_peaks') cal.peakfinder.plot() plt.tight_layout() cal.fit( [356.0129, 661.657, 1460.82], optional=[911.20, 1120.294, 1764.49, 2614.3], gain_range=[0.5, 0.7], de_max=100., ) plot_calibrator(cal) ``` This last plot reveals that the 1460 keV peak does not quite line up with the calibration, so this detector probably exhibits a significant nonlinearity and would have to be calibrated with a more sophisticated method.
github_jupyter
# Adadelta :label:`sec_adadelta` Adadelta是AdaGrad的另一种变体( :numref:`sec_adagrad`), 主要区别在于前者减少了学习率适应坐标的数量。 此外,广义上Adadelta被称为没有学习率,因为它使用变化量本身作为未来变化的校准。 Adadelta算法是在 :cite:`Zeiler.2012`中提出的。 ## Adadelta算法 简而言之,Adadelta使用两个状态变量,$\mathbf{s}_t$用于存储梯度二阶导数的漏平均值,$\Delta\mathbf{x}_t$用于存储模型本身中参数变化二阶导数的泄露平均值。请注意,为了与其他出版物和实现的兼容性,我们使用作者的原始符号和命名(没有其他真正理由为什么应该使用不同的希腊变量来表示在动量中用于相同用途的参数,即AdaGrad、RMSProp和Adadelta)。 以下是Adadelta的技术细节。鉴于参数du jour是$\rho$,我们获得了与 :numref:`sec_rmsprop`类似的以下泄漏更新: $$\begin{aligned} \mathbf{s}_t & = \rho \mathbf{s}_{t-1} + (1 - \rho) \mathbf{g}_t^2. \end{aligned}$$ 与 :numref:`sec_rmsprop`的区别在于,我们使用重新缩放的梯度$\mathbf{g}_t'$执行更新,即 $$\begin{aligned} \mathbf{x}_t & = \mathbf{x}_{t-1} - \mathbf{g}_t'. \\ \end{aligned}$$ 那么,调整后的梯度$\mathbf{g}_t'$是什么?我们可以按如下方式计算它: $$\begin{aligned} \mathbf{g}_t' & = \frac{\sqrt{\Delta\mathbf{x}_{t-1} + \epsilon}}{\sqrt{{\mathbf{s}_t + \epsilon}}} \odot \mathbf{g}_t, \\ \end{aligned}$$ 其中$\Delta \mathbf{x}_{t-1}$是重新缩放梯度的平方$\mathbf{g}_t'$的泄漏平均值。我们将$\Delta \mathbf{x}_{0}$初始化为$0$,然后在每个步骤中使用$\mathbf{g}_t'$更新它,即 $$\begin{aligned} \Delta \mathbf{x}_t & = \rho \Delta\mathbf{x}_{t-1} + (1 - \rho) {\mathbf{g}_t'}^2, \end{aligned}$$ 和$\epsilon$(例如$10^{-5}$这样的小值)是为了保持数字稳定性而加入的。 ## 代码实现 Adadelta需要为每个变量维护两个状态变量,即$\mathbf{s}_t$和$\Delta\mathbf{x}_t$。这将产生以下实施。 ``` %matplotlib inline from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def init_adadelta_states(feature_dim): s_w, s_b = np.zeros((feature_dim, 1)), np.zeros(1) delta_w, delta_b = np.zeros((feature_dim, 1)), np.zeros(1) return ((s_w, delta_w), (s_b, delta_b)) def adadelta(params, states, hyperparams): rho, eps = hyperparams['rho'], 1e-5 for p, (s, delta) in zip(params, states): # In-placeupdatesvia[:] s[:] = rho * s + (1 - rho) * np.square(p.grad) g = (np.sqrt(delta + eps) / np.sqrt(s + eps)) * p.grad p[:] -= g delta[:] = rho * delta + (1 - rho) * g * g ``` 对于每次参数更新,选择$\rho = 0.9$相当于10个半衰期。由此我们得到: ``` data_iter, feature_dim = d2l.get_data_ch11(batch_size=10) d2l.train_ch11(adadelta, init_adadelta_states(feature_dim), {'rho': 0.9}, data_iter, feature_dim); ``` 为了简洁实现,我们只需使用`Trainer`类中的`adadelta`算法。 ``` d2l.train_concise_ch11('adadelta', {'rho': 0.9}, data_iter) ``` ## 小结 * Adadelta没有学习率参数。相反,它使用参数本身的变化率来调整学习率。 * Adadelta需要两个状态变量来存储梯度的二阶导数和参数的变化。 * Adadelta使用泄漏的平均值来保持对适当统计数据的运行估计。 ## 练习 1. 调整$\rho$的值,会发生什么? 1. 展示如何在不使用$\mathbf{g}_t'$的情况下实现算法。为什么这是个好主意? 1. Adadelta真的是学习率为0吗?你能找到Adadelta无法解决的优化问题吗? 1. 将Adadelta的收敛行为与AdaGrad和RMSProp进行比较。 [Discussions](https://discuss.d2l.ai/t/5771)
github_jupyter
# k-Nearest Neighbor (kNN) exercise *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.* The kNN classifier consists of two stages: - During training, the classifier takes the training data and simply remembers it - During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples - The value of k is cross-validated In this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code. ``` # Run some setup code for this notebook. import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt # This is a bit of magic to make matplotlib figures appear inline in the notebook # rather than in a new window. %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 # Load the raw CIFAR-10 data. cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' # Cleaning up variables to prevent loading data multiple times (which may cause memory issue) try: del X_train, y_train del X_test, y_test print('Clear previously loaded data.') except: pass X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # As a sanity check, we print out the size of the training and test data. print('Training data shape: ', X_train.shape) print('Training labels shape: ', y_train.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) # Visualize some examples from the dataset. # We show a few examples of training images from each class. classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] num_classes = len(classes) samples_per_class = 7 for y, cls in enumerate(classes): idxs = np.flatnonzero(y_train == y) idxs = np.random.choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) plt.imshow(X_train[idx].astype('uint8')) plt.axis('off') if i == 0: plt.title(cls) plt.show() # Subsample the data for more efficient code execution in this exercise num_training = 5000 mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] num_test = 500 mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] # Reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) print(X_train.shape, X_test.shape) from cs231n.classifiers import KNearestNeighbor # Create a kNN classifier instance. # Remember that training a kNN classifier is a noop: # the Classifier simply remembers the data and does no further processing classifier = KNearestNeighbor() classifier.train(X_train, y_train) ``` We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps: 1. First we must compute the distances between all test examples and all train examples. 2. Given these distances, for each test example we find the k nearest examples and have them vote for the label Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example. **Note: For the three distance computations that we require you to implement in this notebook, you may not use the np.linalg.norm() function that numpy provides.** First, open `cs231n/classifiers/k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time. ``` # Open cs231n/classifiers/k_nearest_neighbor.py and implement # compute_distances_two_loops. # Test your implementation: dists = classifier.compute_distances_two_loops(X_test) print(dists.shape) # We can visualize the distance matrix: each row is a single test example and # its distances to training examples plt.imshow(dists, interpolation='none') plt.show() ``` # **Inline Question 1** Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.) - What in the data is the cause behind the distinctly bright rows? - What causes the columns? $\color{blue}{\textit Your Answer:}$ *fill this in.* 1. Each training example's L2 between same test example is different and some are bigger. 2. One training example's L2 between each test example is different and some are bigger. ``` # Now implement the function predict_labels and run the code below: # We use k = 1 (which is Nearest Neighbor). y_test_pred = classifier.predict_labels(dists, k=1) # Compute and print the fraction of correctly predicted examples num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) ``` You should expect to see approximately `27%` accuracy. Now lets try out a larger `k`, say `k = 5`: ``` y_test_pred = classifier.predict_labels(dists, k=5) num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) ``` You should expect to see a slightly better performance than with `k = 1`. **Inline Question 2** We can also use other distance metrics such as L1 distance. For pixel values $p_{ij}^{(k)}$ at location $(i,j)$ of some image $I_k$, the mean $\mu$ across all pixels over all images is $$\mu=\frac{1}{nhw}\sum_{k=1}^n\sum_{i=1}^{h}\sum_{j=1}^{w}p_{ij}^{(k)}$$ And the pixel-wise mean $\mu_{ij}$ across all images is $$\mu_{ij}=\frac{1}{n}\sum_{k=1}^np_{ij}^{(k)}.$$ The general standard deviation $\sigma$ and pixel-wise standard deviation $\sigma_{ij}$ is defined similarly. Which of the following preprocessing steps will not change the performance of a Nearest Neighbor classifier that uses L1 distance? Select all that apply. 1. Subtracting the mean $\mu$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu$.) 2. Subtracting the per pixel mean $\mu_{ij}$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu_{ij}$.) 3. Subtracting the mean $\mu$ and dividing by the standard deviation $\sigma$. 4. Subtracting the pixel-wise mean $\mu_{ij}$ and dividing by the pixel-wise standard deviation $\sigma_{ij}$. 5. Rotating the coordinate axes of the data. $\color{blue}{\textit Your Answer:}$ 2, 3, 5 $\color{blue}{\textit Your Explanation:}$ ``` # Now lets speed up distance matrix computation by using partial vectorization # with one loop. Implement the function compute_distances_one_loop and run the # code below: dists_one = classifier.compute_distances_one_loop(X_test) # To ensure that our vectorized implementation is correct, we make sure that it # agrees with the naive implementation. There are many ways to decide whether # two matrices are similar; one of the simplest is the Frobenius norm. In case # you haven't seen it before, the Frobenius norm of two matrices is the square # root of the squared sum of differences of all elements; in other words, reshape # the matrices into vectors and compute the Euclidean distance between them. difference = np.linalg.norm(dists - dists_one, ord='fro') print('One loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # Now implement the fully vectorized version inside compute_distances_no_loops # and run the code dists_two = classifier.compute_distances_no_loops(X_test) # check that the distance matrix agrees with the one we computed before: difference = np.linalg.norm(dists - dists_two, ord='fro') print('No loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # Let's compare how fast the implementations are def time_function(f, *args): """ Call a function f with args and return the time (in seconds) that it took to execute. """ import time tic = time.time() f(*args) toc = time.time() return toc - tic two_loop_time = time_function(classifier.compute_distances_two_loops, X_test) print('Two loop version took %f seconds' % two_loop_time) one_loop_time = time_function(classifier.compute_distances_one_loop, X_test) print('One loop version took %f seconds' % one_loop_time) o_loop_time = time_function(classifier.compute_distances_no_loops, X_test) print('No loop version took %f seconds' % no_loop_time) # You should see significantly faster performance with the fully vectorized implementation! # NOTE: depending on what machine you're using, # you might not see a speedup when you go from two loops to one loop, # and might even see a slow-down. ``` ### Cross-validation We have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation. ``` num_folds = 5 k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100] X_train_folds = [] y_train_folds = [] ################################################################################ # TODO: # # Split up the training data into folds. After splitting, X_train_folds and # # y_train_folds should each be lists of length num_folds, where # # y_train_folds[i] is the label vector for the points in X_train_folds[i]. # # Hint: Look up the numpy array_split function. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** X_train_folds=np.array_split(X_train,num_folds) y_train_folds=np.array_split(y_train,num_folds) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # A dictionary holding the accuracies for different values of k that we find # when running cross-validation. After running cross-validation, # k_to_accuracies[k] should be a list of length num_folds giving the different # accuracy values that we found when using that value of k. k_to_accuracies = {} ################################################################################ # TODO: # # Perform k-fold cross validation to find the best value of k. For each # # possible value of k, run the k-nearest-neighbor algorithm num_folds times, # # where in each case you use all but one of the folds as training data and the # # last fold as a validation set. Store the accuracies for all fold and all # # values of k in the k_to_accuracies dictionary. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** for k in k_choices: each_acc=[] for n in range(num_folds): X_train_unit=np.concatenate((X_train_folds[:n]+X_train_folds[n+1:]),axis=0) y_train_unit=np.concatenate((y_train_folds[:n]+y_train_folds[n+1:]),axis=0) classifier.train(X_train_unit,y_train_unit) dists=classifier.compute_distances_no_loops(X_train_folds[n]) Yval_predict=classifier.predict_labels(dists,k=k) acc=np.mean(Yval_predict==y_train_folds[n]) each_acc+=[acc] k_to_accuracies[k]=each_acc # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Print out the computed accuracies for k in sorted(k_to_accuracies): for accuracy in k_to_accuracies[k]: print('k = %d, accuracy = %f' % (k, accuracy)) # plot the raw observations for k in k_choices: accuracies = k_to_accuracies[k] plt.scatter([k] * len(accuracies), accuracies) # plot the trend line with error bars that correspond to standard deviation accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())]) accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())]) plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std) plt.title('Cross-validation on k') plt.xlabel('k') plt.ylabel('Cross-validation accuracy') plt.show() # Based on the cross-validation results above, choose the best value for k, # retrain the classifier using all the training data, and test it on the test # data. You should be able to get above 28% accuracy on the test data. best_k = k_choices[np.where(accuracies_mean==accuracies_mean.max())[0][0]] print(best_k) classifier = KNearestNeighbor() classifier.train(X_train, y_train) y_test_pred = classifier.predict(X_test, k=best_k) # Compute and display the accuracy num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) ``` **Inline Question 3** Which of the following statements about $k$-Nearest Neighbor ($k$-NN) are true in a classification setting, and for all $k$? Select all that apply. 1. The decision boundary of the k-NN classifier is linear. 2. The training error of a 1-NN will always be lower than that of 5-NN. 3. The test error of a 1-NN will always be lower than that of a 5-NN. 4. The time needed to classify a test example with the k-NN classifier grows with the size of the training set. 5. None of the above. $\color{blue}{\textit Your Answer:}$ $\color{blue}{\textit Your Explanation:}$
github_jupyter
## ISA Create - Sample Assay Plan as a Graph: Mass Spectrometry Here I am showing how from a JSON-like dictionary describing an MS experiment you can create a full SampleAssayPlan as a graph and visualize how this looks like ``` from isatools.model import * from isatools.create.models import * import networkx as nx import plotly.plotly as py import plotly.graph_objs as go import matplotlib.pyplot as plt import pydot from graphviz import Digraph import pygraphviz %matplotlib inline # from: https://stackoverflow.com/questions/29586520/can-one-get-hierarchical-graphs-from-networkx-with-python-3/29597209 def hierarchy_pos(G, root=None, width=1., vert_gap = 0.2, vert_loc = 0, xcenter = 0.5): ''' From Joel's answer at https://stackoverflow.com/a/29597209/2966723. Licensed under Creative Commons Attribution-Share Alike If the graph is a tree this will return the positions to plot this in a hierarchical layout. G: the graph (must be a tree) root: the root node of current branch - if the tree is directed and this is not given, the root will be found and used - if the tree is directed and this is given, then the positions will be just for the descendants of this node. - if the tree is undirected and not given, then a random choice will be used. width: horizontal space allocated for this branch - avoids overlap with other branches vert_gap: gap between levels of hierarchy vert_loc: vertical location of root xcenter: horizontal location of root ''' # NOTE: This was commented out for testing with ISA-API output (a DiGraph) # if not nx.is_tree(G): # raise TypeError('cannot use hierarchy_pos on a graph that is not a tree') if root is None: if isinstance(G, nx.DiGraph): root = next(iter(nx.topological_sort(G))) #allows back compatibility with nx version 1.11 else: root = random.choice(list(G.nodes)) def _hierarchy_pos(G, root, width=1., vert_gap = 0.2, vert_loc = 0, xcenter = 0.5, pos = None, parent = None): ''' see hierarchy_pos docstring for most arguments pos: a dict saying where all nodes go if they have been assigned parent: parent of this branch. - only affects it if non-directed ''' if pos is None: pos = {root:(xcenter,vert_loc)} else: pos[root] = (xcenter, vert_loc) children = list(G.neighbors(root)) if not isinstance(G, nx.DiGraph) and parent is not None: children.remove(parent) if len(children)!=0: dx = width/len(children) nextx = xcenter - width/2 - dx/2 for child in children: nextx += dx pos = _hierarchy_pos(G,child, width = dx, vert_gap = vert_gap, vert_loc = vert_loc-vert_gap, xcenter=nextx, pos=pos, parent = root) return pos return _hierarchy_pos(G, root, width, vert_gap, vert_loc, xcenter) ``` Here we define the structure of our sampling and assay plan, using a Python dictionary. From it we create a full `isatools.create.models.SampleAssayPlan` object ``` ms_assay_dict = OrderedDict([ ('sample', [ { 'node_type': SAMPLE, 'characteristics_category': 'organism part', 'characteristics_value': 'liver', 'size': 1, 'technical_replicates': None, 'is_input_to_next_protocols': True }, { 'node_type': SAMPLE, 'characteristics_category': 'organism part', 'characteristics_value': 'blood', 'size': 5, 'technical_replicates': None, 'is_input_to_next_protocols': True }, { 'node_type': SAMPLE, 'characteristics_category': 'organism part', 'characteristics_value': 'heart', 'size': 1, 'technical_replicates': None, 'is_input_to_next_protocols': True } ]), ('extraction', {}), ('extract', [ { 'node_type': SAMPLE, 'characteristics_category': 'extract type', 'characteristics_value': 'polar fraction', 'size': 1, 'technical_replicates': None, 'is_input_to_next_protocols': True }, { 'node_type': SAMPLE, 'characteristics_category': 'extract type', 'characteristics_value': 'lipids', 'size': 1, 'technical_replicates': None, 'is_input_to_next_protocols': True } ]), ('labeling', {}), ('labeled_extract', [ { 'node_type': SAMPLE, 'characteristics_category': 'labeled extract type', 'characteristics_value': '', 'size': 2, 'technical_replicates': None, 'is_input_to_next_protocols': True } ]), ('mass_spectrometry', { 'instrument': ['Agilent QTQF §'], 'injection_mode': ['FIA', 'LC'], 'acquisition_mode': ['positive mode'] }), ('raw_spectral_data_file', [ { 'node_type': DATA_FILE, 'size': 1, 'technical_replicates': 2, 'is_input_to_next_protocols': False } ]) ]) ms_assay_plan = SampleAndAssayPlan.from_sample_and_assay_plan_dict(ms_assay_dict) ``` The `ms_assay_plan` object is a graph. Let's which are its `nodes`. ``` nx_graph = ms_assay_plan.as_networkx_graph() # set(nx_graph.nodes) nx_graph.number_of_nodes() ``` Here we print the `links` or `edges` of the graph ``` # set(nx_graph.edges) nx_graph.number_of_edges() nx_graph.size() ``` We output is as a `networkx` graph and we visualize it using `matplotlib` ``` G=nx_graph # nx.draw(G) nx.draw(nx_graph,pos=nx.spring_layout(G),node_color=range(G.number_of_nodes()),cmap=plt.cm.Blues, with_labels=False) SG1 = G.subgraph(['sample_000','extraction_000_000','extract_000_000','extract_001_000','labeling_000_000','labeling_000_003','labeled_extract_000_000','labeled_extract_000_003']) # print(list(SG.edges)) pos1 = hierarchy_pos(SG1,'sample_000') nx.draw(SG1, pos=pos1, with_labels=True,node_color = 'b') plt.savefig('hierarchy1.png') # SG2 = G.subgraph(['sample_001','extraction_000_001','extract_000_001','extract_001_001','labeling_000_001','labeling_000_004','labeled_extract_000_001','labeled_extract_000_004']) # # print(list(SG.edges)) # pos2 = hierarchy_pos(SG2,'sample_001') # nx.draw(SG2, pos=pos2, with_labels=True,node_color = 'pink') # plt.savefig('hierarchy2.png') # Generating a graphviz compatible output dot = Digraph() for node in nx_graph.nodes: dot.node(node) dot.edges(nx_graph.edges) filename=dot.filename # print(dot.source) dot.graph_attr['rankdir'] = 'LR' # to layout from left to right (horizontal), rather than top to bottom (vertical) dot.render(filename, view=True) # nx.draw_networkx_edges(nx_graph,pos=nx.spring_layout(nx_graph)) # fig = go.Figure(data=[nx_graph.nodes,nx_graph.edges]) # nx.draw(nx_graph, with_labels=False, font_weight='bold') nx.drawing.draw_planar(nx_graph,node_color=range(G.number_of_nodes()),cmap=plt.cm.Blues, style='dashed') nx.nx_agraph.to_agraph(nx_graph).layout() nx.nx_agraph.to_agraph(nx_graph).write("isa-test.dot") G=nx.nx_agraph.read_dot("isa-test.dot") # G = nx.bipartite.gnmk_random_graph(3, 5, 10, seed=123) # top = nx.bipartite.sets(G)[3] # pos = nx.bipartite_layout(G, top) # pos = nx.planar_layout(G) pos=nx.drawing.layout.planar_layout(G, scale=2, center=None, dim=2) nx.draw(nx_graph,pos=nx.drawing.layout.planar_layout(G, scale=1, center=None, dim=2),node_color=range(G.number_of_nodes()),cmap=plt.cm.Blues) NG = nx.karate_club_graph() res = [0,1,2,3,4,5, 'parrot'] #I've added 'parrot', a node that's not in G #just to demonstrate that G.subgraph is okay #with nodes not in G. k = NG.subgraph(res) pos = nx.spring_layout(NG) #setting the positions with respect to G, not k. plt.figure() nx.draw_networkx(k, pos=pos) othersubgraph = NG.subgraph(range(6,NG.order())) nx.draw_networkx(othersubgraph, pos=pos, node_color = 'pink') plt.show() ```
github_jupyter
### Introduction An example of implementing the Metapath2Vec representation learning algorithm using components from the `stellargraph` and `gensim` libraries. **References** **1.** Metapath2Vec: Scalable Representation Learning for Heterogeneous Networks. Yuxiao Dong, Nitesh V. Chawla, and Ananthram Swami. ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), 135–144, 2017. ([link](https://ericdongyx.github.io/papers/KDD17-dong-chawla-swami-metapath2vec.pdf)) **2.** Distributed representations of words and phrases and their compositionality. T. Mikolov, I. Sutskever, K. Chen, G. S. Corrado, and J. Dean. In Advances in Neural Information Processing Systems (NIPS), pp. 3111-3119, 2013. ([link](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf)) **3.** Gensim: Topic modelling for humans. ([link](https://radimrehurek.com/gensim/)) **4.** Social Computing Data Repository at ASU [http://socialcomputing.asu.edu]. R. Zafarani and H. Liu. Tempe, AZ: Arizona State University, School of Computing, Informatics and Decision Systems Engineering. 2009. ``` import matplotlib.pyplot as plt from sklearn.manifold import TSNE from sklearn.decomposition import PCA import os import networkx as nx import numpy as np import pandas as pd from stellargraph.data.loader import load_dataset_BlogCatalog3 %matplotlib inline ``` ### Load the dataset The dataset is the BlogCatalog3 network. It can be downloaded from [here.](http://socialcomputing.asu.edu/datasets/BlogCatalog3) The following is the description of the dataset from the publisher [4]: > This is the data set crawled from BlogCatalog ( http://www.blogcatalog.com ). BlogCatalog is a social blog directory website. This contains the friendship network crawled and group memberships. For easier understanding, all the contents are organized in CSV file format. The statistics of this network are, - Number of bloggers : 10,312 - Number of friendship pairs: 333,983 - Number of groups: 39 We assume that the dataset file `BlogCatalog-dataset.zip` has been downloaded and unzipped in the directory, `~/data` and the data in `csv` format (the files `edges.csv`, `nodes.csv`, `groups.csv`, and `group-edges.csv` can be found in directory, `~/data/BlogCatalog-dataset/data/` ``` dataset_location = os.path.expanduser("~/data/BlogCatalog-dataset/data") g_nx = load_dataset_BlogCatalog3(location=dataset_location) print("Number of nodes {} and number of edges {} in graph.".format(g_nx.number_of_nodes(), g_nx.number_of_edges())) ``` ### The Metapath2Vec algorithm The Metapath2Vec algorithm introduced in [1] is a 2-step representation learning algorithm. The two steps are: 1. Use uniform random walks to generate sentences from a graph. A sentence is a list of node IDs. The set of all sentences makes a corpus. The random walk is driven by a metapath that defines the node type order by which the random walker explores the graph. 2. The corpus is then used to learn an embedding vector for each node in the graph. Each node ID is considered a unique word/token in a dictionary that has size equal to the number of nodes in the graph. The Word2Vec algorithm [2] is used for calculating the embedding vectors. ## Corpus generation using random walks The `stellargraph` library provides an implementation for uniform, first order, random walks as required by Metapath2Vec. The random walks have fixed maximum length and are controlled by the list of metapath schemas specified in parameter `metapaths`. A metapath schema defines the type of node that the random walker is allowed to transition to from its current location. In the `stellargraph` implementation of metapath-driven random walks, the metapath schemas are given as a list of node types under the assumption that the input graph is not a multi-graph, i.e., two nodes are only connected by one edge type. See [1] for a detailed description of metapath schemas and metapth-driven random walks. For the **BlogCatalog3** dataset we use the following 3 metapaths. - "user", "group", "user" - "user", "group", "user", "user" - "user", "user" ``` from stellargraph.data import UniformRandomMetaPathWalk from stellargraph import StellarGraph # Create the random walker rw = UniformRandomMetaPathWalk(StellarGraph(g_nx)) # specify the metapath schemas as a list of lists of node types. metapaths = [ ["user", "group", "user"], ["user", "group", "user", "user"], ["user", "user"], ] walks = rw.run(nodes=list(g_nx.nodes()), # root nodes length=100, # maximum length of a random walk n=1, # number of random walks per root node metapaths=metapaths # the metapaths ) print("Number of random walks: {}".format(len(walks))) ``` ### Representation Learning using Word2Vec We use the Word2Vec [2] implementation in the free Python library gensim [3] to learn representations for each node in the graph. We set the dimensionality of the learned embedding vectors to 128 as in [1]. ``` from gensim.models import Word2Vec model = Word2Vec(walks, size=128, window=5, min_count=0, sg=1, workers=2, iter=1) model.wv.vectors.shape # 128-dimensional vector for each node in the graph ``` ### Visualise Node Embeddings We retrieve the Word2Vec node embeddings that are 128-dimensional vectors and then we project them down to 2 dimensions using the [t-SNE](http://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html) algorithm. ``` # Retrieve node embeddings and corresponding subjects node_ids = model.wv.index2word # list of node IDs node_embeddings = model.wv.vectors # numpy.ndarray of size number of nodes times embeddings dimensionality node_targets = [ g_nx.nodes[node_id]['label'] for node_id in node_ids] ``` Transform the embeddings to 2d space for visualisation ``` transform = TSNE #PCA trans = transform(n_components=2) node_embeddings_2d = trans.fit_transform(node_embeddings) # draw the points label_map = { l: i for i, l in enumerate(np.unique(node_targets))} node_colours = [ label_map[target] for target in node_targets] plt.figure(figsize=(20,16)) plt.axes().set(aspect="equal") plt.scatter(node_embeddings_2d[:,0], node_embeddings_2d[:,1], c=node_colours, alpha=0.3) plt.title('{} visualization of node embeddings'.format(transform.__name__)) plt.show() ``` ### Downstream task The node embeddings calculated using Metapath2Vec can be used as feature vectors in a downstream task such as node attribute inference (e.g., inferring the gender or age attribute of 'user' nodes), community detection (e.g., clustering of 'user' nodes based on the similarity of their embedding vectors), and link prediction (e.g., prediction of friendship relation between 'user' nodes).
github_jupyter
# Using Astropy Quantities and Units for astrophysical calculations ## Authors Ana Bonaca, Erik Tollerud, Jonathan Foster, Lia Corrales, Kris Stern, Stephanie T. Douglas ## Learning Goals * Use `Quantity` objects to estimate a hypothetical galaxy's mass * Take advantage of constants in the `astropy.constants` library * Print formatted unit strings * Plot `Quantity` objects with unit labels, using `astropy.visualization.quantity_support` * Do math with `Quantity` objects * Convert quantities with `astropy.units` * Convert between wavelength and energy with `astropy.units.spectral` equivalencies * Use the small angle approximation with `astropy.units.dimensionless_angles` equivalencies * Write functions that take `Quantity` objects instead of numpy arrays * Make synthetic radio observations * Use `Quantity` objects such as data cubes to facilitate a full derivation of the total mass of a molecular cloud ## Keywords units, radio astronomy, data cubes, matplotlib ## Companion Content [Tools for Radio Astronomy](https://www.springer.com/gp/book/9783662053942) by Rohlfs & Wilson ## Summary In this tutorial we present some examples showing how Astropy's `Quantity` object can make astrophysics calculations easier. The examples include calculating the mass of a galaxy from its velocity dispersion and determining masses of molecular clouds from CO intensity maps. We end with an example of good practices for using quantities in functions you might distribute to other people. For an in-depth discussion of `Quantity` objects, see the [astropy documentation section](http://docs.astropy.org/en/stable/units/quantity.html). ## Preliminaries We start by loading standard libraries and set up plotting for ipython notebooks. ``` import numpy as np import matplotlib.pyplot as plt # You shouldn't use the `seed` function in real science code, but we use it here for example purposes. # It makes the "random" number generator always give the same numbers wherever you run it. np.random.seed(12345) # Set up matplotlib import matplotlib.pyplot as plt %matplotlib inline ``` It's conventional to load the Astropy `units` module as the variable `u`, demonstrated below. This will make working with `Quantity` objects much easier. Astropy also has a `constants` module where typical physical constants are available. The constants are stored as objects of a subclass of `Quantity`, so they behave just like a `Quantity`. Here, we'll only need the gravitational constant `G`, Planck's constant `h`, and Boltzmann's constant, `k_B`. ``` import astropy.units as u from astropy.constants import G, h, k_B ``` We will also show an example of plotting while taking advantage of the `astropy.visualization` package, which provides support for `Quantity` units. ``` from astropy.visualization import quantity_support ``` ## 1. Galaxy mass In this first example, we will use `Quantity` objects to estimate a hypothetical galaxy's mass, given its half-light radius and radial velocities of stars in the galaxy. Let's assume that we measured the half-light radius of the galaxy to be 29 pc projected on the sky at the distance of the galaxy. This radius is often called the "effective radius", so we'll store it as a `Quantity` object with the name `Reff`. The easiest way to create a `Quantity` object is by multiplying the value with its unit. Units are accessed as u."unit", in this case u.pc. ``` Reff = 29 * u.pc ``` A completely equivalent (but more verbose) way of doing the same thing is to use the `Quantity` object's initializer, demonstrated below. In general, the simpler form (above) is preferred, as it is closer to how such a quantity would actually be written in text. The initalizer form has more options, though, which you can learn about from the [astropy reference documentation on Quantity](http://docs.astropy.org/en/stable/api/astropy.units.quantity.Quantity.html). ``` Reff = u.Quantity(29, unit=u.pc) ``` We can access the value and unit of a `Quantity` using the `value` and `unit` attributes. ``` print("""Half light radius value: {0} unit: {1}""".format(Reff.value, Reff.unit)) ``` The `value` and `unit` attributes can also be accessed within the print function. ``` print("""Half light radius value: {0.value} unit: {0.unit}""".format(Reff)) ``` Furthermore, we can convert the radius in parsecs to any other unit of length using the ``to()`` method. Here, we convert it to meters. ``` print("{0:.3g}".format(Reff.to(u.m))) ``` Next, we'll first create a synthetic dataset of radial velocity measurements, assuming a normal distribution with a mean velocity of 206 km/s and a velocity dispersion of 4.3 km/s. ``` vmean = 206 sigin = 4.3 v = np.random.normal(vmean, sigin, 500)*u.km/u.s print("""First 10 radial velocity measurements: {0} {1}""".format(v[:10], v.to(u.m/u.s)[:10])) ``` One can ocassionally run into issues when attempting to plot `Quantity` objects with `matplotlib` libraries. It is always possible to fix this by passing the value array (e.g., `v.value`) to `matplotlib` functions. However, calling the `astropy.visualization.quantity_support()` function will change the settings on your `matplotlib` session to better handle astropy `Quantity` objects: ``` quantity_support() ``` Now we can plot a histogram of the velocity dataset. Note that, due to calling `quantity_support`, the x-axis is automatically labeled with the correct units. ``` plt.figure() plt.hist(v, bins='auto', histtype="step") plt.ylabel("N") ``` Now we can calculate the velocity dispersion of the galaxy. This demonstrates how you can perform basic operations like subtraction and division with `Quantity` objects, and also use them in standard numpy functions such as `mean()` and `size()`. They retain their units through these operations just as you would expect them to. ``` sigma = np.sqrt(np.sum((v - np.mean(v))**2) / np.size(v)) print("Velocity dispersion: {0:.2f}".format(sigma)) ``` Note how we needed to use `numpy` square root function, because the resulting velocity dispersion quantity is a `numpy` array. If we used the python standard `math` library's `sqrt` function instead, we get an error. ``` sigma_scalar = np.sqrt(np.sum((v - np.mean(v))**2) / len(v)) ``` In general, you should only use `numpy` functions with `Quantity` objects, *not* the `math` equivalents, unless you are sure you understand the consequences. Now for the actual mass calculation. If a galaxy is pressure-supported (for example, an elliptical or dwarf spheroidal galaxy), its mass within the stellar extent can be estimated using a straightforward formula: $M_{1/2}=4\sigma^2 R_{eff}/G$. There are caveats to the use of this formula for science -- see Wolf et al. 2010 for details. For demonstrating `Quantity`, you can accept that this is often good enough. For the calculation, we can multiply the quantities together, and `astropy` will keep track of the units. ``` M = 4*sigma**2*Reff/G M ``` The result is in a composite unit, so it's not really obvious it's a mass. However, it can be decomposed to cancel all of the length units ($km^2 pc/m^3$) using the `decompose()` method. ``` M.decompose() ``` We can also easily express the mass in whatever form you like -- solar masses are common in astronomy, or maybe you want the default SI and CGS units. ``` print("""Galaxy mass in solar units: {0:.3g} SI units: {1:.3g} CGS units: {2:.3g}""".format(M.to(u.Msun), M.si, M.cgs)) ``` Or, if you want the log of the mass, you can just use ``np.log10`` as long as the logarithm's argument is dimensionless. ``` np.log10(M.to_value(u.Msun)) ``` However, you can't take the log of something with units, as that is not mathematically sensible. ``` np.log10(M) ``` ## Exercises Use `Quantity` and Kepler's law in the form given below to determine the (circular) orbital speed of the Earth around the sun in km/s. No need to look up constants or conversion factors to do this calculation -- it's all in `astropy.units` and `astropy.constants`. $$v = \sqrt{\frac{G M_{\odot}}{r}}$$ There's a much easier way to figure out the velocity of the Earth using just two units or quantities. Do that and then compare to the Kepler's law answer (the easiest way is probably to compute the percentage difference, if any). (Completely optional, but a good way to convince yourself of the value of Quantity:) Do the above calculations by hand -- you can use a calculator (or python just for its arithmatic) but look up all the appropriate conversion factors and use paper-and-pencil approaches for keeping track of them all. Which one took longer? ## 2. Molecular cloud mass In this second example, we will demonstrate how using `Quantity` objects can facilitate a full derivation of the total mass of a molecular cloud using radio observations of isotopes of Carbon Monoxide (CO). #### Setting up the data cube Let's assume that we've mapped the inner part of a molecular cloud in the J=1-0 rotational transition of ${\rm C}^{18}{\rm O}$ and are interested in measuring its total mass. The measurement produced a data cube with RA and Dec as spatial coordiates and velocity as the third axis. Each voxel in this data cube represents the brightness temperature of the emission at that position and velocity. Furthermore, we'll assume that we have an independent measurement of distance to the cloud $d=250$ pc and that the excitation temperature is known and constant throughout the cloud: $T_{ex}=25$ K. ``` d = 250 * u.pc Tex = 25 * u.K ``` We'll generate a synthetic dataset, assuming the cloud follows a Gaussian distribution in each of RA, Dec and velocity. We start by creating a 100x100x300 numpy array, such that the first coordinate is right ascension, the second is declination, and the third is velocity. We use the `numpy.meshgrid` function to create data cubes for each of the three coordinates, and then use them in the formula for a Gaussian to generate an array with the synthetic data cube. In this cube, the cloud is positioned at the center of the cube, with $\sigma$ and the center in each dimension shown below. Note in particular that the $\sigma$ for RA and Dec have different units from the center, but `astropy` automatically does the relevant conversions before computing the exponential. ``` # Cloud's center cen_ra = 52.25 * u.deg cen_dec = 0.25 * u.deg cen_v = 15 * u.km/u.s # Cloud's size sig_ra = 3 * u.arcmin sig_dec = 4 * u.arcmin sig_v = 3 * u.km/u.s #1D coordinate quantities ra = np.linspace(52, 52.5, 100) * u.deg dec = np.linspace(0, 0.5, 100) * u.deg v = np.linspace(0, 30, 300) *u.km/u.s #this creates data cubes of size for each coordinate based on the dimensions of the other coordinates ra_cube, dec_cube, v_cube = np.meshgrid(ra, dec, v) data_gauss = np.exp(-0.5*((ra_cube-cen_ra)/sig_ra)**2 + -0.5*((dec_cube-cen_dec)/sig_dec)**2 + -0.5*((v_cube-cen_v)/sig_v)**2 ) ``` The units of the exponential are dimensionless, so we multiply the data cube by K to get brightness temperature units. Radio astronomers use a rather odd set of units [K km/s] as of integrated intensity (that is, summing all the emission from a line over velocity). As an aside for experts, we're setting up our artificial cube on the main-beam temperature scale (T$_{\rm MB}$) which is the closest we can normally get to the actual brightness temperature of our source. ``` data = data_gauss * u.K ``` We will also need to know the width of each velocity bin and the size of each pixel, so let's calculate that now. ``` # Average pixel size # This is only right if dec ~ 0, because of the cos(dec) factor. dra = (ra.max() - ra.min()) / len(ra) ddec = (dec.max() - dec.min()) / len(dec) #Average velocity bin width dv = (v.max() - v.min()) / len(v) print("""dra = {0} ddec = {1} dv = {2}""".format(dra.to(u.arcsec), ddec.to(u.arcsec), dv)) ``` We're interested in the integrated intensity over all of the velocity channels, so let's create a 2D quantity array by summing our data cube along the velocity axis (multiplying by the velocity width of a pixel). ``` intcloud = np.sum(data*dv, axis=2) intcloud.unit ``` We can plot the 2D quantity using matplotlib's imshow function, by passing the quantity's value. Similarly, we can set the correct extent using the values of $x_i$ and $x_f$. Finally, we can set the colorbar label to have proper units. ``` #Note that we display RA in the convential way by going from max to min plt.imshow(intcloud.value, origin='lower', extent=[ra.value.max(), ra.value.min(), dec.value.min(), dec.value.max()], cmap='hot', interpolation='nearest', aspect='equal') plt.colorbar().set_label("Intensity ({})".format(intcloud.unit)) plt.xlabel("RA (deg)") plt.ylabel("Dec (deg)"); ``` #### Measuring The Column Density of CO In order to calculate the mass of the molecular cloud, we need to measure its column density. A number of assumptions are required for the following calculation; the most important are that the emission is optically thin (typically true for ${\rm C}^{18}{\rm O}$) and that conditions of local thermodynamic equilibrium hold along the line of sight. In the case where the temperature is large compared to the separation in energy levels for a molecule and the source fills the main beam of the telescope, the total column density for ${\rm C}^{13}{\rm O}$ is $N=C \frac{\int T_B(V) dV}{1-e^{-B}}$ where the constants $C$ and $B$ are given by: $C=3.0\times10^{14} \left(\frac{\nu}{\nu_{13}}\right)^2 \frac{A_{13}}{A} {\rm K^{-1} cm^{-2} \, km^{-1} \, s}$ $B=\frac{h\nu}{k_B T}$ (Rohlfs & Wilson [Tools for Radio Astronomy](https://www.springer.com/gp/book/9783662053942)). Here we have given an expression for $C$ scaled to the values for ${\rm C}^{13}{\rm O}$ ($\nu_{13}$ and $A_{13}$). In order to use this relation for ${\rm C}^{18}{\rm O}$, we need to rescale the frequencies ${\nu}$ and Einstein coefficients $A$. $C$ is in funny mixed units, but that's okay. We'll define it as a `Quantities` object and not have to worry about it. First, we look up the wavelength for these emission lines and store them as quantities. ``` lambda13 = 2.60076 * u.mm lambda18 = 2.73079 * u.mm ``` Since the wavelength and frequency of light are related using the speed of light, we can convert between them. However, doing so just using the `to()` method fails, as units of length and frequency are not convertible: ``` nu13 = lambda13.to(u.Hz) nu18 = lambda18.to(u.Hz) ``` Fortunately, `astropy` comes to the rescue by providing a feature called "unit equivalencies." Equivalencies provide a way to convert between two physically different units that are not normally equivalent, but in a certain context have a one-to-one mapping. For more on equivalencies, see the [equivalencies section of astropy's documentation](http://docs.astropy.org/en/stable/units/equivalencies.html). In this case, calling the ``astropy.units.spectral()`` function provides the equivalencies necessary to handle conversions between wavelength and frequency. To use it, provide the equivalencies to the `equivalencies` keyword of the ``to()`` call: ``` nu13 = lambda13.to(u.Hz, equivalencies=u.spectral()) nu18 = lambda18.to(u.Hz, equivalencies=u.spectral()) ``` Next, we look up Einstein coefficients (in units of s$^{-1}$), and calculate the ratios in constant $C$. Note how the ratios of frequency and Einstein coefficient units are dimensionless, so the unit of $C$ is unchanged. ``` nu13 = 115271096910.13396 * u.Hz nu18 = 109782318669.689 * u.Hz A13 = 7.4e-8 / u.s A18 = 8.8e-8 / u.s C = 3e14 * (nu18/nu13)**3 * (A13/A18) / (u.K * u.cm**2 * u.km *(1/u.s)) C ``` Now we move on to calculate the constant $B$. This is given by the ratio of $\frac{h\nu}{k_B T}$, where $h$ is Planck's constant, $k_B$ is the Boltzmann's constant, $\nu$ is the emission frequency, and $T$ is the excitation temperature. The constants were imported from `astropy.constants`, and the other two values are already calculated, so here we just take the ratio. ``` B = h * nu18 / (k_B * Tex) ``` The units of $B$ are Hz sec, which can be decomposed to a dimensionless unit if you actually care about its value. Usually this is not necessary, though. Quantities are at their best if you use them without worrying about intermediate units, and only convert at the very end when you want a final answer. ``` print('{0}\n{1}'.format(B, B.decompose())) ``` At this point we have all the ingredients to calculate the number density of $\rm CO$ molecules in this cloud. We already integrated (summed) over the velocity channels above to show the integrated intensity map, but we'll do it again here for clarity. This gives us the column density of CO for each spatial pixel in our map. We can then print out the peak column column density. ``` NCO = C * np.sum(data*dv, axis=2) / (1 - np.exp(-B)) print("Peak CO column density: ") np.max(NCO) ``` #### CO to Total Mass We are using CO as a tracer for the much more numerous H$_2$, the quantity we are actually trying to infer. Since most of the mass is in H$_2$, we calculate its column density by multiplying the CO column density with the (known/assumed) H$_2$/CO ratio. ``` H2_CO_ratio = 5.9e6 NH2 = NCO * H2_CO_ratio print("Peak H2 column density: ") np.max(NH2) ``` That's a peak column density of roughly 50 magnitudes of visual extinction (assuming the conversion between N$_{\rm H_2}$ and A$_V$ from Bohlin et al. 1978), which seems reasonable for a molecular cloud. We obtain the mass column density by multiplying the number column density by the mass of an individual H$_2$ molecule. ``` mH2 = 2 * 1.008 * u.Dalton #aka atomic mass unit/amu rho = NH2 * mH2 ``` A final step in going from the column density to mass is summing up over the area area. If we do this in the straightforward way of length x width of a pixel, this area is then in units of ${\rm deg}^2$. ``` dap = dra * ddec print(dap) ``` Now comes an important subtlety: in the small angle approximation, multiplying the pixel area with the square of distance yields the cross-sectional area of the cloud that the pixel covers, in *physical* units, rather than angular units. So it's tempting to just multiply the area and the square of the distance. ``` da = dap * d**2 # don't actually do it this way - use the version below instead! print(da) dap.to(u.steradian).value * d**2 ``` But this is **wrong**, because `astropy.units` treats angles (and solid angles) as actual physical units, while the small-angle approximation assumes angles are dimensionless. So if you, e.g., try to convert to a different area unit, it will fail: ``` da.to(u.cm**2) ``` The solution is to use the `dimensionless_angles` equivalency, which allows angles to be treated as dimensionless. This makes it so that they will automatically convert to radians and become dimensionless when a conversion is needed. ``` da = (dap * d**2).to(u.pc**2, equivalencies=u.dimensionless_angles()) da da.to(u.cm**2) ``` Finally, multiplying the column density with the pixel area and summing over all the pixels gives us the cloud mass. ``` M = np.sum(rho * da) M.decompose().to(u.solMass) ``` ## Exercises The astro material was pretty heavy on that one, so let's focus on some associated statistics using `Quantity`'s array capabililities. Compute the median and mean of the `data` with the ``np.mean`` and ``np.median`` functions. Why are their values so different? Similarly, compute the standard deviation and variance (if you don't know the relevant functions, look it up in the numpy docs or just type np.<tab> and a code cell). Do they have the units you expect? ## 3. Using Quantities with Functions `Quantity` is also a useful tool if you plan to share some of your code, either with collaborators or the wider community. By writing functions that take `Quantity` objects instead of raw numbers or arrays, you can write code that is agnostic to the input unit. In this way, you may even be able to prevent [the destruction of Mars orbiters](http://en.wikipedia.org/wiki/Mars_Climate_Orbiter#Cause_of_failure). Below, we provide a simple example. Suppose you are working on an instrument, and the person funding it asks for a function to give an analytic estimate of the response function. You determine from some tests it's basically a Lorentzian, but with a different scale along the two axes. Your first thought might be to do this: ``` def response_func(xinarcsec, yinarcsec): xscale = 0.9 yscale = 0.85 xfactor = 1 / (1 + xinarcsec/xscale) yfactor = 1 / (1 + yinarcsec/yscale) return xfactor * yfactor ``` You meant the inputs to be in arcsec, but alas, you send that to your collaborator and they don't look closely and think the inputs are instead supposed to be in arcmin. So they do: ``` response_func(1.0, 1.2) ``` And now they tell all their friends how terrible the instrument is, because it's supposed to have arcsecond resolution, but your function clearly shows it can only resolve an arcmin at best. But you can solve this by requiring they pass in `Quantity` objects. The new function could simply be: ``` def response_func(x, y): xscale = 0.9 * u.arcsec yscale = 0.85 * u.arcsec xfactor = 1 / (1 + x/xscale) yfactor = 1 / (1 + y/yscale) return xfactor * yfactor ``` And your collaborator now has to pay attention. If they just blindly put in a number they get an error: ``` response_func(1.0, 1.2) ``` Which is their cue to provide the units explicitly: ``` response_func(1.0*u.arcmin, 1.2*u.arcmin) ``` The funding agency is impressed at the resolution you achieved, and your instrument is saved! You now go on to win the Nobel Prize due to discoveries the instrument makes. And it was all because you used `Quantity` as the input of code you shared. ## Exercise Write a function that computes the Keplerian velocity you worked out in section 1 (using `Quantity` input and outputs, of course), but allowing for an arbitrary mass and orbital radius. Try it with some reasonable numbers for satellites orbiting the Earth, a moon of Jupiter, or an extrasolar planet. Feel free to use wikipedia or similar for the masses and distances.
github_jupyter
# Regularization 欢迎来到这次的算法优化实验,我们知道过拟合是深度学习训练中十分常见的问题,解决该问题的办法有许多中,例如增加数据量、Dropout、Regularization等方法,在本实验中你将会使用Regularization正则化的方式来优化我们的模型,解决过拟合问题。 ## 1 - 引用库 首先,载入几个需要用到的库: ``` import numpy as np import matplotlib.pyplot as plt from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters import sklearn import sklearn.datasets import scipy.io from testCases import * %matplotlib inline plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' ``` ** 问题描述:** 假设你刚刚被一家足球公司聘为AI专家,他们希望你推荐守门员应该踢球的位置,这个位置上的球有大概率被本队的队员通过头球接到。 <img src="images/field_kiank.png" style="width:600px;height:350px;"> <caption><center> <u> **图片 1** </u>: **足球场**<br> 守门员将球踢到空中,两只队伍的球员分别用头球的方式来抢球 </center></caption> 他们给你提供了10场比赛的头球2D数据. ``` train_X, train_Y, test_X, test_Y = load_2D_dataset() ``` 每一个点代表着足球场上一名球员接到球的位置,我们假设球是由左边球场的守门员踢出的。 - 如果是蓝色的圆点,则表示该球被我方队员接到 - 如果是红色的圆点,则表示该球被对方队员接到 ** 你的目标 **: 用深度学习来分析我方守门员应该将球踢到哪个位置,让球有大概率被我方球员接到。 ** 数据集分析 **:这个数据集有点杂音,但是看起来像左上角(蓝色)和右下角(红色)之间的对角线会起作用。 你将首先尝试一个non-regularized的模型,然后,你将学习如何使用regularization,并决定选择哪种模型来解决足球公司的问题。 你可以使用下面这个model()函数,它可以用在: - 正则化 * - 通过将`lambd`输入设置为一个非零值。 我们使用“lambd”代替“lambda”,因为“lambda”是Python中的保留关键字。 - Dropout * - 通过设置`keep_prob`小于一个值 首先尝试没有正则化的模型,然后,你将执行: - * L2正则化 * - 函数:“`compute_cost_with_regularization()`”和“`backward_propagation_with_regularization()`” - * Dropout * - 函数:``forward_propagation_with_dropout()`“和”`backward_propagation_with_dropout()`“ 在每个部分中,你可以调整参数来调用model(),看看下面的代码并熟悉它。 ``` def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1): """ Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID. Arguments: X -- input data, of shape (input size, number of examples) Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples) learning_rate -- learning rate of the optimization num_iterations -- number of iterations of the optimization loop print_cost -- If True, print the cost every 10000 iterations lambd -- regularization hyperparameter, scalar keep_prob - probability of keeping a neuron active during drop-out, scalar. Returns: parameters -- parameters learned by the model. They can then be used to predict. """ grads = {} costs = [] # to keep track of the cost m = X.shape[1] # number of examples layers_dims = [X.shape[0], 20, 3, 1] # Initialize parameters dictionary. parameters = initialize_parameters(layers_dims) # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID. if keep_prob == 1: a3, cache = forward_propagation(X, parameters) elif keep_prob < 1: a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob) # Cost function if lambd == 0: cost = compute_cost(a3, Y) else: cost = compute_cost_with_regularization(a3, Y, parameters, lambd) # Backward propagation. assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout, # but this assignment will only explore one at a time if lambd == 0 and keep_prob == 1: grads = backward_propagation(X, Y, cache) elif lambd != 0: grads = backward_propagation_with_regularization(X, Y, cache, lambd) elif keep_prob < 1: grads = backward_propagation_with_dropout(X, Y, cache, keep_prob) # Update parameters. parameters = update_parameters(parameters, grads, learning_rate) # Print the loss every 10000 iterations if print_cost and i % 10000 == 0: print("Cost after iteration {}: {}".format(i, cost)) if print_cost and i % 1000 == 0: costs.append(cost) # plot the cost plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (x1,000)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters ``` 首先,让我们先训练一个没有加入正则化的模型,并观察训练结果。 ``` parameters = model(train_X, train_Y) print ("On the training set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) ``` 我们可以看到训练准确率为94.8%,测试准确率为91.5%。我们把这个当做**基础模型**,你将会看到正则化带来的优化效果。运行下述代码来看看这个模型的决策边界。 ``` plt.title("Model without regularization") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` 很明显这个**基础模型**产生了过拟合的现象,它将杂音数据也包裹了进去!现在让我们用L2正则化(L2 Regularization)和Dropout来解决这个问题。 ## 2 - L2正则化(L2 Regularization) 一个简单的解决过拟合的方法是**L2正则化**. 它简单地修改了**成本函数(Cost Function)**: - 从原式: $$J = -\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} \tag{1}$$ - 修改为: $$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} }_\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_l\sum\limits_k\sum\limits_j W_{k,j}^{[l]2} }_\text{L2 regularization cost} \tag{2}$$ 下面让我们修改成本函数,观察训练结果吧. **练习**: 实现 `compute_cost_with_regularization()` 来计算公式 (2). 要计算 $\sum\limits_k\sum\limits_j W_{k,j}^{[l]2}$ , 可以使用下述方法: ```python np.sum(np.square(Wl)) ``` 你需要分别 $W^{[1]}$, $W^{[2]}$ , $W^{[3]}$都进行上面的计算, 将它们求和并乘以 $ \frac{1}{m} \frac{\lambda}{2} $. ``` # GRADED FUNCTION: compute_cost_with_regularization def compute_cost_with_regularization(A3, Y, parameters, lambd): """ Implement the cost function with L2 regularization. See formula (2) above. Arguments: A3 -- post-activation, output of forward propagation, of shape (output size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) parameters -- python dictionary containing parameters of the model Returns: cost - value of the regularized loss function (formula (2)) """ m = Y.shape[1] W1 = parameters["W1"] W2 = parameters["W2"] W3 = parameters["W3"] cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost ### START CODE HERE ### (approx. 1 line) L2_regularization_cost = 1. / m * lambd / 2. * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3))) ### END CODER HERE ### cost = cross_entropy_cost + L2_regularization_cost return cost A3, Y_assess, parameters = compute_cost_with_regularization_test_case() print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1))) ``` 现在你已经改变了成本函数,由于反向传播中的所有计算都是基于成本函数,所以你也得修改反向传播的过程。 **练习**: 改变反向传播的过程,也就是修改dW1, dW2 and dW3,为它们每一个都添加一个正则化项的梯度值 ($\frac{d}{dW} ( \frac{1}{2}\frac{\lambda}{m} W^2) = \frac{\lambda}{m} W$). ``` # GRADED FUNCTION: backward_propagation_with_regularization def backward_propagation_with_regularization(X, Y, cache, lambd): """ Implements the backward propagation of our baseline model to which we added an L2 regularization. Arguments: X -- input dataset, of shape (input size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation() lambd -- regularization hyperparameter, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y ### START CODE HERE ### (approx. 1 line) dW3 = 1./m * np.dot(dZ3, A2.T) + lambd / m * W3 ### END CODE HERE ### db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) ### START CODE HERE ### (approx. 1 line) dW2 = 1./m * np.dot(dZ2, A1.T) + lambd / m * W2 ### END CODE HERE ### db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) ### START CODE HERE ### (approx. 1 line) dW1 = 1./m * np.dot(dZ1, X.T) + lambd / m * W1 ### END CODE HERE ### db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case() grads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7) print ("dW1 = "+ str(grads["dW1"])) print ("dW2 = "+ str(grads["dW2"])) print ("dW3 = "+ str(grads["dW3"])) ``` **Expected Output**: <table> <tr> <td> **dW1** </td> <td> [[-0.25604646 0.12298827 -0.28297129] [-0.17706303 0.34536094 -0.4410571 ]] </td> </tr> <tr> <td> **dW2** </td> <td> [[ 0.79276486 0.85133918] [-0.0957219 -0.01720463] [-0.13100772 -0.03750433]] </td> </tr> <tr> <td> **dW3** </td> <td> [[-1.77691347 -0.11832879 -0.09397446]] </td> </tr> </table> 让我们设置L2正则化参数 $(\lambda = 0.7)$. `model()` 现在将会使用: - `compute_cost_with_regularization()` 替代 `compute_cost()` - `backward_propagation_with_regularization()` 替代 `backward_propagation()` ``` parameters = model(train_X, train_Y, lambd = 0.7) print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) ``` 可以看到准确率已经被提升至93%!成功解决了过拟合的问题,现在让我们输出决策边界。 ``` plt.title("Model with L2-regularization") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` **结论**: - 参数 $\lambda$是一个超参数 - L2正则化让我们的模型更加平滑. 如果 $\lambda$ 太大, 会导致模型过度平滑(oversmooth), 导致模型产生很大的训练误差。 **L2正则化究竟在做什么?**: L2正则化依赖于这样的假设,即具有小权重的模型比具有大权重的模型简单。 因此,通过惩罚成本函数中权重的平方值,可以将所有权重削减到较小的值,使得模型更加平滑,从而减少过拟合问题。 <font color='blue'> **你应该记住** -- L2正则化的实现: - 成本函数cost的计算: - cost中添加了一个正则化项 - 反向传播: - 在梯度下降中减去一个额外项 - 权重值变小了 ("weight decay"): - 权重被削减为更小的值. ## 3 - Dropout 最后, **dropout** 是一个广泛运用的解决过拟合的方法。 **在每次迭代中随机“关闭”一些节点** Dropout让我们在每次迭代中只使用神经元中的一部分,它让我们不对某个神经元过分敏感,因为每个神经元都有几率会被“关闭”,从而减少过拟合的问题。 ### 3.1 - Forward propagation with dropout **练习**: 实现dropout前向传播。使用一个三层网络,并在第一层和第二层加入dropout操作,因为我们不会在输入层和输出层使用dropout。 **指导**: 实现dropout操作,你需要进行以下四个步骤: 1. 我们设置变量 $d^{[1]}$ ,使用和 $a^{[1]}$ 一样的数据形状,并用 `np.random.rand()` 初始化为0到1之间。 使用向量化操作来构造 $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ ,使用和 $A^{[1]}$一样的数据形状。 2. 将$D^{[1]}$ 中的值以(`1-keep_prob`)的概率设置为0,或者以(`keep_prob`)的概率设置成1。提示: 如果要将X中所有的值设置成0 (如果值<0.5) 或者 1 (如果值>0.5),你可以这么做: `X = (X < 0.5)`. 0和1分别相应地代表False和True。 3. 设置 $A^{[1]}$ 为 $A^{[1]} * D^{[1]}$. (因为我们“关闭”了一些神经元). 可以将 $D^{[1]}$ 当做掩码矩阵(mask), 当乘以这个掩码矩阵时,就起到了“关闭”部分神经元的作用。 4. 让 $A^{[1]}$ 除以 `keep_prob`. 这样做使得使用dropout最后输出的期望值和未使用dropout的值相近. (这样做被称作inverted dropout.) ``` # GRADED FUNCTION: forward_propagation_with_dropout def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5): """ Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID. Arguments: X -- input dataset, of shape (2, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (20, 2) b1 -- bias vector of shape (20, 1) W2 -- weight matrix of shape (3, 20) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: A3 -- last activation value, output of the forward propagation, of shape (1,1) cache -- tuple, information stored for computing the backward propagation """ np.random.seed(1) # retrieve parameters W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) ### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above. D1 = np.random.rand(np.shape(A1)[0], np.shape(A1)[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...) D1 = D1 < keep_prob # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold) A1 = np.multiply(A1, D1) # Step 3: shut down some neurons of A1 A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down ### END CODE HERE ### Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) ### START CODE HERE ### (approx. 4 lines) D2 = np.random.rand(np.shape(A2)[0], np.shape(A2)[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...) D2 = D2 < keep_prob # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold) A2 = np.multiply(A2, D2) # Step 3: shut down some neurons of A2 A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down ### END CODE HERE ### Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) return A3, cache X_assess, parameters = forward_propagation_with_dropout_test_case() A3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7) print ("A3 = " + str(A3)) ``` **期望的输出**: <table> <tr> <td> **A3** </td> <td> [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]] </td> </tr> </table> ### 3.2 - Backward propagation with dropout **练习**: 实现带dropout的反向传播. 和之前一样,我们在训练一个三层的神经网络. 使用存在cache中的 $D^{[1]}$ and $D^{[2]}$ 来给第一层和第二层神经元添加dropout。 **指导**: 带dropout的反向传播实现很简单,只需两步: 1. 在前向传播中我们利用 $D^{[1]}$ 来关闭了 `A1`中的部分神经元。在后向传播者, 我们需要使用一样的 $D^{[1]}$ 来关闭 `dA1`中的部分神经元. 2. 在前向传播中, 我们让 `A1` 除以 `keep_prob`。所以在后向传播中, 你也需要让 `dA1` 除以 `keep_prob` 来让它们保持相同的scale。 ``` # GRADED FUNCTION: backward_propagation_with_dropout def backward_propagation_with_dropout(X, Y, cache, keep_prob): """ Implements the backward propagation of our baseline model to which we added dropout. Arguments: X -- input dataset, of shape (2, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation_with_dropout() keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) ### START CODE HERE ### (≈ 2 lines of code) dA2 = np.multiply(dA2, D2) # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down ### END CODE HERE ### dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) ### START CODE HERE ### (≈ 2 lines of code) dA1 = np.multiply(dA1, D1) # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down ### END CODE HERE ### dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case() gradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8) print ("dA1 = " + str(gradients["dA1"])) print ("dA2 = " + str(gradients["dA2"])) ``` **Expected Output**: <table> <tr> <td> **dA1** </td> <td> [[ 0.36544439 0. -0.00188233 0. -0.17408748] [ 0.65515713 0. -0.00337459 0. -0. ]] </td> </tr> <tr> <td> **dA2** </td> <td> [[ 0.58180856 0. -0.00299679 0. -0.27715731] [ 0. 0.53159854 -0. 0.53159854 -0.34089673] [ 0. 0. -0.00292733 0. -0. ]] </td> </tr> </table> 让我们设置 (`keep_prob = 0.86`)来运行model(). 这意味着在每次迭代中我们要“关闭”在第一层和第二层中24%的神经元. 函数 `model()` 会使用: - `forward_propagation_with_dropout()` 代替 `forward_propagation()`. - `backward_propagation_with_dropout()` 代替 `backward_propagation()`. ``` parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3) print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) ``` 大家可以看到,测试准确率已经被提升至95%! 现在过拟合的问题已经被彻底解决。 现在来输出模型的决策边界。 ``` plt.title("Model with dropout") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` **小提示**: - 一个 **常见错误** 是许多人在训练和测试的过程都使用了dropout,但其实我们只需要在训练过程中使用dropout即可。 - 许多深度学习框架,比如[tensorflow](https://www.tensorflow.org/api_docs/python/tf/nn/dropout), [PaddlePaddle](http://doc.paddlepaddle.org/release_doc/0.9.0/doc/ui/api/trainer_config_helpers/attrs.html), [keras](https://keras.io/layers/core/#dropout) 或者 [caffe](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) 都提供dropout的接口。 <font color='blue'> **对于dropout,你应该记住:** - Dropout是一个正则化方法。 - 你应该在训练过程中使用dropout,在测试过程中请不要使用dropout。 - 在前向传播和后向传播中都使用dropout。 - 将每个使用dropout的层除以 `keep_prob` 来保持输出值与原来的期望值一致。 **下面是三个模型的训练结果**: <table> <tr> <td> **model** </td> <td> **train accuracy** </td> <td> **test accuracy** </td> </tr> <td> 3-layer NN without regularization </td> <td> 95% </td> <td> 91.5% </td> <tr> <td> 3-layer NN with L2-regularization </td> <td> 94% </td> <td> 93% </td> </tr> <tr> <td> 3-layer NN with dropout </td> <td> 93% </td> <td> 95% </td> </tr> </table> 我们可以看到其实train accuracy在应用了L2正则化和Dropout后下降了,这是因为它们都限制了模型的过度拟合,但是鉴于最终的test accuracy得到了提升,所以它们还是对模型起到了优化的效果。 现在恭喜大家完成了本次实验。 <font color='blue'> **做完本实验大家应该记住:**: - 正则化能帮助我们减少过拟合的情况。 - 正则化会导致权重值减小。 - L2 regularization和Dropout是两种有效的对抗过拟合的方法。
github_jupyter
``` from google.colab import drive drive.mount('/content/drive') import torch.nn as nn import torch.nn.functional as F import pandas as pd import numpy as np import matplotlib.pyplot as plt import torch import torchvision import torchvision.transforms as transforms from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils from matplotlib import pyplot as plt import copy # Ignore warnings import warnings warnings.filterwarnings("ignore") transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True) testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') foreground_classes = {'plane', 'car', 'bird'} background_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'} fg1,fg2,fg3 = 0,1,2 dataiter = iter(trainloader) background_data=[] background_label=[] foreground_data=[] foreground_label=[] batch_size=10 for i in range(5000): images, labels = dataiter.next() for j in range(batch_size): if(classes[labels[j]] in background_classes): img = images[j].tolist() background_data.append(img) background_label.append(labels[j]) else: img = images[j].tolist() foreground_data.append(img) foreground_label.append(labels[j]) foreground_data = torch.tensor(foreground_data) foreground_label = torch.tensor(foreground_label) background_data = torch.tensor(background_data) background_label = torch.tensor(background_label) def create_mosaic_img(bg_idx,fg_idx,fg): """ bg_idx : list of indexes of background_data[] to be used as background images in mosaic fg_idx : index of image to be used as foreground image from foreground data fg : at what position/index foreground image has to be stored out of 0-8 """ image_list=[] j=0 for i in range(9): if i != fg: image_list.append(background_data[bg_idx[j]].type("torch.DoubleTensor")) j+=1 else: image_list.append(foreground_data[fg_idx].type("torch.DoubleTensor")) label = foreground_label[fg_idx]- fg1 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2 #image_list = np.concatenate(image_list ,axis=0) image_list = torch.stack(image_list) return image_list,label desired_num = 30000 mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9 mosaic_label=[] # label of mosaic image = foreground class present in that mosaic for i in range(desired_num): bg_idx = np.random.randint(0,35000,8) fg_idx = np.random.randint(0,15000) fg = np.random.randint(0,9) fore_idx.append(fg) image_list,label = create_mosaic_img(bg_idx,fg_idx,fg) mosaic_list_of_images.append(image_list) mosaic_label.append(label) class MosaicDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list_of_images self.label = mosaic_label self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx], self.fore_idx[idx] batch = 125 msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx) train_loader = DataLoader( msd,batch_size= batch ,shuffle=True) class Focus(nn.Module): def __init__(self): super(Focus, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0) self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0) self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0) self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.batch_norm1 = nn.BatchNorm2d(32) self.batch_norm2 = nn.BatchNorm2d(128) self.dropout1 = nn.Dropout2d(p=0.05) self.dropout2 = nn.Dropout2d(p=0.1) self.fc1 = nn.Linear(128,64) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, 10) self.fc4 = nn.Linear(10, 2) def forward(self, x): x = self.conv1(x) x = F.relu(self.batch_norm1(x)) x = (F.relu(self.conv2(x))) x = self.pool(x) x = self.conv3(x) x = F.relu(self.batch_norm2(x)) x = (F.relu(self.conv4(x))) x = self.pool(x) x = self.dropout1(x) x = self.conv5(x) x = F.relu(self.batch_norm2(x)) x = (F.relu(self.conv6(x))) x = self.pool(x) x = x.view(x.size(0), -1) x = self.dropout2(x) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.dropout2(x) x = F.relu(self.fc3(x)) x = self.fc4(x) return x focus_net = Focus().double() focus_net = focus_net.to("cuda") focus_net.load_state_dict( torch.load("/content/drive/My Drive/Research/Cheating_data/Focus_net_weights/focus_net_6layer_cnn.pt")) print(focus_net.fc4) print(focus_net.fc4.weight) print(focus_net.fc4.bias) temp = focus_net.fc4.weight.data temp2 = focus_net.fc4.bias.data focus_net.fc4 = nn.Linear(10,1).double() focus_net.fc4.weight.data = torch.unsqueeze(temp[1,:], 0) focus_net.fc4.bias.data = torch.unsqueeze(temp2[1], 0) focus_net = focus_net.to("cuda") print(focus_net.fc4.weight) print(focus_net.fc4.bias) class Classification(nn.Module): def __init__(self, focus_net): super(Classification, self).__init__() self.module1 = focus_net self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0) self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=0) self.fc1 = nn.Linear(1024, 512) self.fc2 = nn.Linear(512, 64) self.fc3 = nn.Linear(64, 10) self.fc4 = nn.Linear(10,3) def forward(self,z): #z batch of list of 9 images y = torch.zeros([batch,3, 32,32], dtype=torch.float64) x = torch.zeros([batch,9],dtype=torch.float64) x = x.to("cuda") y = y.to("cuda") for i in range(9): x[:,i] = self.module1.forward(z[:,i])[:,0] x = F.softmax(x,dim=1) x1 = x[:,0] torch.mul(x1[:,None,None,None],z[:,0]) for i in range(9): x1 = x[:,i] y = y + torch.mul(x1[:,None,None,None],z[:,i]) y1 = self.pool(F.relu(self.conv1(y))) y1 = self.pool(F.relu(self.conv2(y1))) # print(x.shape) y1 = (F.relu(self.conv3(y1))) y1 = y1.view(y1.size(0), -1) # print(x.shape) y1 = F.relu(self.fc1(y1)) y1 = F.relu(self.fc2(y1)) y1 = F.relu(self.fc3(y1)) y1 = self.fc4(y1) return y1 , x, y classify = Classification(focus_net).double() classify = classify.to("cuda") test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image test_label=[] # label of mosaic image = foreground class present in that mosaic for i in range(10000): bg_idx = np.random.randint(0,35000,8) fg_idx = np.random.randint(0,15000) fg = np.random.randint(0,9) fore_idx_test.append(fg) image_list,label = create_mosaic_img(bg_idx,fg_idx,fg) test_images.append(image_list) test_label.append(label) test_data = MosaicDataset(test_images,test_label,fore_idx_test) test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False) correct = 0 total = 0 count = 0 flag = 1 focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 with torch.no_grad(): for data in train_loader: inputs, labels , fore_idx = data inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda") outputs, alphas, avg_images = classify(inputs) _, predicted = torch.max(outputs.data, 1) for j in range(labels.size(0)): count += 1 focus = torch.argmax(alphas[j]) if alphas[j][focus] >= 0.5 : argmax_more_than_half += 1 else: argmax_less_than_half += 1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true += 1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false += 1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false += 1 total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) ) print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) ) print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) ) print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) ) print("argmax_more_than_half ==================> ",argmax_more_than_half) print("argmax_less_than_half ==================> ",argmax_less_than_half) print(count) print("="*100) correct = 0 total = 0 count = 0 flag = 1 focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 with torch.no_grad(): for data in test_loader: inputs, labels , fore_idx = data inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda") outputs, alphas, avg_images = classify(inputs) _, predicted = torch.max(outputs.data, 1) for j in range(labels.size(0)): focus = torch.argmax(alphas[j]) if alphas[j][focus] >= 0.5 : argmax_more_than_half += 1 else: argmax_less_than_half += 1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true += 1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false += 1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false += 1 total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) ) print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) ) print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) ) print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) ) print("argmax_more_than_half ==================> ",argmax_more_than_half) print("argmax_less_than_half ==================> ",argmax_less_than_half) correct = 0 total = 0 with torch.no_grad(): for data in train_loader: inputs, labels , fore_idx = data inputs, labels = inputs.to("cuda"), labels.to("cuda") outputs, alphas, avg_images = classify(inputs) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) correct = 0 total = 0 with torch.no_grad(): for data in test_loader: inputs, labels , fore_idx = data inputs, labels = inputs.to("cuda"), labels.to("cuda") outputs, alphas, avg_images = classify(inputs) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) ```
github_jupyter
#### Objective In this notebook I introduce a function to despike logs using rolling statistics to define the what constitutes a spike, and what does not. I will apply the despiking to the P-wave velocity from one of the wells already used in the [Backusfrom dataframe notebook](https://github.com/mycarta/in-bruges/blob/master/notebooks/Backus_from_dataframe.ipynb). #### Import libraries ``` import numpy as np import pandas as pd from scipy.ndimage.morphology import binary_dilation from welly import Project, Well import matplotlib.pyplot as plt ``` #### Import well ``` R39 = Well.from_las('../data/R-39.las') ``` #### Data clean-up and manipulation - Make dataframe - Deal with null values - Select columns of interest - Convert slowness to velocity - Add well name column Make dataframe ``` w39_df = R39.df() w39_df = w39_df[['DT4P', 'DT4S', 'RHOB']] w39_df.columns = ['DT', 'DTS', 'RHOB'] w39_df.describe(include = 'all') ``` Checking well R-39 for null values ``` for x in w39_df.columns: print (x, w39_df[x].isnull().values.any()) print(w39_df.isnull().sum()) # finds how many missing points there are ``` There are no null values. Convert slowness to velocity (usec/m >> m/s) ``` w39_df['Vp'] = 1.0e6 / w39_df['DT'] w39_df['Vs'] = 1.0e6 / w39_df['DTS'] w39_df.describe(include = 'all') ``` Add well name column ``` w39_df['DEPTH'] = w39_df.index w39_df['well'] = 'R-39' w39_df = w39_df.reset_index(drop=True) w39_df.describe(include = 'all') ``` Drop slownness columns, sort columns ``` w39_df.drop(w39_df.columns[[0, 1]], inplace=True, axis=1) w39_df = w39_df[['DEPTH', 'Vp', 'Vs', 'RHOB', 'well']] w39_df.describe(include = 'all') ``` #### Despiking Get P-wave velocity as an array ``` s=w39_df['Vp'].values ``` Despiking function ``` def despike(s, w, stds): """ Despikes a curve using rolling statistics. First, it calculates the rolling median of the input curve on a long window. Next, it calculates the difference between the median and the input. Finally, it replaces the input with the median if their difference exceeds the mean difference plus a user defined number of standard deviations of the difference. Args: s = input curve (ndarray) w = long window length for the rolling median filter (integer) std = the number of standard deviations to use to flag spikes (integer) Returns: out (ndarray) = despiked curve """ m = pd.Series(s).rolling(window=w, center=True).median() flag = np.where(error_flag(pd.Series(s), m, dev = stds)==1) out = np.copy(s) out[flag] = m.values[flag] return out def rolling_despike(s, w1=75, w2=9, dev=4, dil=29): """ Despikes a curve using rolling statistics. First, it calculates the rolling median of the input curve on a long window. Next, it calculates the difference between the median and the input. Finally, it replaces the input with the median if their difference exceeds the mean difference plus a user defined number of standard deviations of the difference. Args: s = input curve (ndarray) w = long window length for the rolling median filter (integer) std = the number of standard deviations to use to flag spikes (integer) Returns: out (ndarray) = despiked curve """ s = pd.Series(s) mdn = s.rolling(window=w1, min_periods=1, center=True).apply(lambda x : np.nanmedian(x), 'raw=True') s_mdn = s.rolling(window=w2, min_periods=1, center=True).apply(lambda x : np.nanmedian(x), 'raw=True') mdn_mdn = mdn.rolling(window=w2, min_periods=1, center=True).apply(lambda x : np.nanmedian(x), 'raw=True') flag = np.where(error_flag(s_mdn, mdn_mdn, dev, dil)==1) out = np.copy(s) out[flag] = mdn.values[flag] return out def error_flag(pred, actual, dev, dil): """ Calculate the difference between a predicted and an actual curve and return a log flagging large differences based on a user-defined distance (in standard deviation units) from the mean difference Matteo Niccoli, October 2018 Args: predicted (ndarray) = predicted log actual (ndarray) = original log dev (float) = standard deviations to use, default 1 Returns: flag (ndarray) = error flag curve """ flag = np.zeros(len(pred)) err = np.abs(pred-actual) err_mean = np.mean(err) err_std = np.std(err) flag[np.where(err>(err_mean + (dev*err_std)))] = 1 flag=binary_dilation(flag, np.ones(dil))*1 return flag plt.figure(figsize=(22,6)) plt.plot(s, 'r', label='Original Vp') plt.plot(rolling_despike(s), 'k', label='Despiked Vp') plt.ylim(2800,6200) plt.xlim(0,4000) plt.legend(); ``` ## Compare to Welly despike ``` def rolling_window(s, window_length, func1d, step=1, return_rolled=False): """ Private function. Smoother for other smoothing/conditioning functions. Args: window_length (int): the window length. func1d (function): a function that takes a 1D array and returns a scalar. step (int): if you want to skip samples in the shifted versions. Don't use this for smoothing, you will get strange results. Returns: ndarray: the resulting array. """ # Force odd. if window_length % 2 == 0: window_length += 1 shape = s.shape[:-1] + (s.shape[-1], window_length) strides = s.strides + (step*s.strides[-1],) data = np.nan_to_num(s) data = np.pad(data, int(step*window_length//2), mode='edge') rolled = np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides) result = np.apply_along_axis(func1d, -1, rolled) result[np.isnan(s)] = np.nan if return_rolled: return result, rolled else: return result def despike(s, window_length=45, samples=True, z=2): """ Args: window (int): window length in samples. Default 33 (or 5 m for most curves sampled at 0.1524 m intervals). samples (bool): window length is in samples. Use False for a window length given in metres. z (float): Z score Returns: Curve. """ window_length //= 1 if samples else s.step z *= np.nanstd(s) # Transform to curve's units curve_sm = rolling_window(s,window_length, np.median) spikes = np.where(np.nan_to_num(s - curve_sm) > z)[0] spukes = np.where(np.nan_to_num(curve_sm - s) > z)[0] out = np.copy(s) np.copy(s) out[spikes] = curve_sm[spikes] + z out[spukes] = curve_sm[spukes] - z return out plt.figure(figsize=(22,6)) plt.plot(s, 'r', label='Original Vp') plt.plot(despike(s), 'k', label='Despiked Vp') plt.ylim(2800,6200) plt.xlim(0,3000) plt.legend(); ```
github_jupyter
# Training Roboschool agents using distributed RL training across multiple nodes with Amazon SageMaker This notebook is an extension of `rl_roboschool_ray.ipynb` showcasing horizontal scaling of Reinforcement learning using Ray and TensorFlow. ## Pick which Roboschool problem to solve Roboschool is an [open source](https://github.com/openai/roboschool/tree/master/roboschool) physics simulator that is commonly used to train RL policies for robotic systems. Roboschool defines a [variety](https://github.com/openai/roboschool/blob/master/roboschool/__init__.py) of Gym environments that correspond to different robotics problems. Here we're highlighting a few of them at varying levels of difficulty: - **Reacher (easy)** - a very simple robot with just 2 joints reaches for a target - **Hopper (medium)** - a simple robot with one leg and a foot learns to hop down a track - **Humanoid (difficult)** - a complex 3D robot with two arms, two legs, etc. learns to balance without falling over and then to run on a track The simpler problems train faster with less computational resources. The more complex problems are more fun. ``` # Uncomment the problem to work on roboschool_problem = "reacher" # roboschool_problem = 'hopper' # roboschool_problem = 'humanoid' ``` ## Pre-requisites ### Imports To get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations. ``` import sagemaker import boto3 import sys import os import glob import re import subprocess from IPython.display import HTML, Markdown import time from time import gmtime, strftime sys.path.append("common") from misc import get_execution_role, wait_for_s3_object from docker_utils import build_and_push_docker_image from sagemaker.rl import RLEstimator, RLToolkit, RLFramework from markdown_helper import generate_help_for_s3_endpoint_permissions, create_s3_endpoint_manually ``` ### Setup S3 bucket Set up the linkage and authentication to the S3 bucket that you want to use for checkpoint and the metadata. ``` sage_session = sagemaker.session.Session() s3_bucket = sage_session.default_bucket() s3_output_path = "s3://{}/".format(s3_bucket) print("S3 bucket path: {}".format(s3_output_path)) ``` ### Define Variables We define variables such as the job prefix for the training jobs *and the image path for the container (only when this is BYOC).* ``` # create a descriptive job name job_name_prefix = "rl-roboschool-distributed-" + roboschool_problem aws_region = boto3.Session().region_name ``` ### Configure where training happens You can train your RL training jobs using the SageMaker notebook instance or local notebook instance. In both of these scenarios, you can run the following in either local or SageMaker modes. The local mode uses the SageMaker Python SDK to run your code in a local container before deploying to SageMaker. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. You just need to set `local_mode = True`. ``` # run in local_mode on this machine, or as a SageMaker TrainingJob? local_mode = False if local_mode: instance_type = "local" else: # If on SageMaker, pick the instance type instance_type = "ml.c5.2xlarge" train_instance_count = 3 ``` ### Create an IAM role Either get the execution role when running from a SageMaker notebook instance `role = sagemaker.get_execution_role()` or, when running from local notebook instance, use utils method `role = get_execution_role()` to create an execution role. ``` try: role = sagemaker.get_execution_role() except: role = get_execution_role() print("Using IAM role arn: {}".format(role)) ``` ### Install docker for `local` mode In order to work in `local` mode, you need to have docker installed. When running from you local machine, please make sure that you have docker and docker-compose (for local CPU machines) and nvidia-docker (for local GPU machines) installed. Alternatively, when running from a SageMaker notebook instance, you can simply run the following script to install dependenceis. Note, you can only run a single local notebook at one time. ``` # only run from SageMaker notebook instance if local_mode: !/bin/bash ./common/setup.sh ``` ## Build docker container We must build a custom docker container with Roboschool installed. This takes care of everything: 1. Fetching base container image 2. Installing Roboschool and its dependencies 3. Uploading the new container image to ECR This step can take a long time if you are running on a machine with a slow internet connection. If your notebook instance is in SageMaker or EC2 it should take 3-10 minutes depending on the instance type. ``` %%time cpu_or_gpu = "gpu" if instance_type.startswith("ml.p") else "cpu" repository_short_name = "sagemaker-roboschool-ray-%s" % cpu_or_gpu docker_build_args = { "CPU_OR_GPU": cpu_or_gpu, "AWS_REGION": boto3.Session().region_name, } custom_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using ECR image %s" % custom_image_name) ``` ## Write the Training Code The training code is in a series of The training code is written in the file “train-{roboschool_problem}.py” which is uploaded in the /src directory. First import the environment files and the preset files, and then define the main() function. ``` !pygmentize src/train-{roboschool_problem}.py ``` ## Ray homogeneous scaling - Specify train_instance_count > 1 Homogeneous scaling allows us to use multiple instances of the same type. ``` metric_definitions = RLEstimator.default_metric_definitions(RLToolkit.RAY) estimator = RLEstimator( entry_point="train-%s.py" % roboschool_problem, source_dir="src", dependencies=["common/sagemaker_rl"], image_uri=custom_image_name, role=role, instance_type=instance_type, instance_count=train_instance_count, output_path=s3_output_path, base_job_name=job_name_prefix, metric_definitions=metric_definitions, hyperparameters={ # Attention scientists! You can override any Ray algorithm parameter here: # 3 m4.2xl with 8 cores each. We have to leave 1 core for ray scheduler. # Don't forget to change this on the basis of instance type. "rl.training.config.num_workers": (8 * train_instance_count) - 1 # "rl.training.config.horizon": 5000, # "rl.training.config.num_sgd_iter": 10, }, ) estimator.fit(wait=local_mode) job_name = estimator.latest_training_job.job_name print("Training job: %s" % job_name) ``` ### Visualization RL training can take a long time. So while it's running there are a variety of ways we can track progress of the running training job. Some intermediate output gets saved to S3 during training, so we'll set up to capture that. ``` print("Job name: {}".format(job_name)) s3_url = "s3://{}/{}".format(s3_bucket, job_name) if local_mode: output_tar_key = "{}/output.tar.gz".format(job_name) else: output_tar_key = "{}/output/output.tar.gz".format(job_name) intermediate_folder_key = "{}/output/intermediate/".format(job_name) output_url = "s3://{}/{}".format(s3_bucket, output_tar_key) intermediate_url = "s3://{}/{}".format(s3_bucket, intermediate_folder_key) print("S3 job path: {}".format(s3_url)) print("Output.tar.gz location: {}".format(output_url)) print("Intermediate folder path: {}".format(intermediate_url)) tmp_dir = "/tmp/{}".format(job_name) os.system("mkdir {}".format(tmp_dir)) print("Create local folder {}".format(tmp_dir)) ``` ### Fetch videos of training rollouts Videos of certain rollouts get written to S3 during training. Here we fetch the last 10 videos from S3, and render the last one. ``` recent_videos = wait_for_s3_object( s3_bucket, intermediate_folder_key, tmp_dir, fetch_only=(lambda obj: obj.key.endswith(".mp4") and obj.size > 0), limit=10, ) last_video = sorted(recent_videos)[-1] # Pick which video to watch os.system("mkdir -p ./src/tmp_render/ && cp {} ./src/tmp_render/last_video.mp4".format(last_video)) HTML('<video src="./src/tmp_render/last_video.mp4" controls autoplay></video>') ``` ### Plot metrics for training job We can see the reward metric of the training as it's running, using algorithm metrics that are recorded in CloudWatch metrics. We can plot this to see the performance of the model over time. ``` %matplotlib inline from sagemaker.analytics import TrainingJobAnalytics df = TrainingJobAnalytics(job_name, ["episode_reward_mean"]).dataframe() num_metrics = len(df) if num_metrics == 0: print("No algorithm metrics found in CloudWatch") else: plt = df.plot(x="timestamp", y="value", figsize=(12, 5), legend=True, style="b-") plt.set_ylabel("Mean reward per episode") plt.set_xlabel("Training time (s)") ``` ### Monitor training progress You can repeatedly run the visualization cells to get the latest videos or see the latest metrics as the training job proceeds. ## Ray heterogeneous scaling To scale out RL training, we can increase the number of rollout workers. However, with more rollouts, training can often become the bottleneck. To prevent this, we can use an instance with one or more GPUs for training, and multiple CPU instances for rollouts. Since SageMaker supports a single type of instance in a training job, we can achieve the above by spinning two SageMaker jobs and letting them communicate with each other. For the sake of naming, we'll use `Primary cluster` to refer to 1 or more GPU instances, and `Secondary cluster` to refer to the cluster of CPU instances. > Please note that local_mode cannot be used for testing this type of scaling. Before we configure the SageMaker job, let us first ensure that we run SageMaker in VPC mode. VPC mode will allow the two SageMaker jobs to communicate over network. This can be done by supplying subnets and security groups to the job launching scripts. We will use the default VPC configuration for this example. ``` ec2 = boto3.client("ec2") default_vpc = [vpc["VpcId"] for vpc in ec2.describe_vpcs()["Vpcs"] if vpc["IsDefault"] == True][0] default_security_groups = [ group["GroupId"] for group in ec2.describe_security_groups()["SecurityGroups"] if group["GroupName"] == "default" and group["VpcId"] == default_vpc ] default_subnets = [ subnet["SubnetId"] for subnet in ec2.describe_subnets()["Subnets"] if subnet["VpcId"] == default_vpc and subnet["DefaultForAz"] == True ] print("Using default VPC:", default_vpc) print("Using default security group:", default_security_groups) print("Using default subnets:", default_subnets) ``` A SageMaker job running in VPC mode cannot access S3 resources. So, we need to create a VPC S3 endpoint to allow S3 access from SageMaker container. To learn more about the VPC mode, please visit [this link.](https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html) ``` try: route_tables = [ route_table["RouteTableId"] for route_table in ec2.describe_route_tables()["RouteTables"] if route_table["VpcId"] == default_vpc ] except Exception as e: if "UnauthorizedOperation" in str(e): display(Markdown(generate_help_for_s3_endpoint_permissions(role))) else: display(Markdown(create_s3_endpoint_manually(aws_region, default_vpc))) raise e print("Trying to attach S3 endpoints to the following route tables:", route_tables) assert len(route_tables) >= 1, ( "No route tables were found. Please follow the VPC S3 endpoint creation " "guide by clicking the above link." ) try: ec2.create_vpc_endpoint( DryRun=False, VpcEndpointType="Gateway", VpcId=default_vpc, ServiceName="com.amazonaws.{}.s3".format(aws_region), RouteTableIds=route_tables, ) print("S3 endpoint created successfully!") except Exception as e: if "RouteAlreadyExists" in str(e): print("S3 endpoint already exists.") elif "UnauthorizedOperation" in str(e): display(Markdown(generate_help_for_s3_endpoint_permissions(role))) raise e else: display(Markdown(create_s3_endpoint_manually(aws_region, default_vpc))) raise e ``` ### Configure instance types Let us configure a cluster with 1 Volta (V100) GPU and 40 CPU cores. We can do this by using 1 ml.p3.2xlarge instance and 2 ml.c5.4xlarge instances, since ml.p3.2xlarge has 8 CPU cores and ml.c5.4xlarge has 16 CPU cores. ``` %%time # Build CPU image cpu_repository_short_name = "sagemaker-roboschool-ray-%s" % "cpu" docker_build_args = { "CPU_OR_GPU": "cpu", "AWS_REGION": boto3.Session().region_name, } cpu_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using CPU ECR image %s" % cpu_image_name) # Build GPU image gpu_repository_short_name = "sagemaker-roboschool-ray-%s" % "gpu" docker_build_args = { "CPU_OR_GPU": "gpu", "AWS_REGION": boto3.Session().region_name, } gpu_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using GPU ECR image %s" % gpu_image_name) primary_cluster_instance_type = "ml.p3.2xlarge" primary_cluster_instance_count = 1 secondary_cluster_instance_type = "ml.c5.4xlarge" secondary_cluster_instance_count = 2 total_cpus = 40 - 1 # Leave one for ray scheduler total_gpus = 1 ``` Next, we choose the roboschool agent that we want to train. For heterogeneous training, we also pass some additional parameters to the training job that aid in synchronization across instances: - s3_bucket, s3_prefix: Used for storing metadata like master IP address - rl_cluster_type: "primary" or "secondary" - aws_region: This is required for making connection to S3 in VPC mode - rl_num_instances_secondary: Number of nodes in secondary cluster - subnets, security_group_ids: Required by VPC mode ``` roboschool_problem = "reacher" job_name_prefix = "rl-roboschool-distributed-" + roboschool_problem s3_output_path = "s3://{}/".format(s3_bucket) # SDK appends the job name and output folder # We explicitly need to specify these params so that the two jobs can synchronize using the metadata stored here s3_bucket = sage_session.default_bucket() s3_prefix = "dist-ray-%s-1GPU-40CPUs" % (roboschool_problem) # Make sure that the prefix is empty !aws s3 rm --recursive s3://{s3_bucket}/{s3_prefix} ``` ### Launch primary cluster (1 GPU training instance) ``` primary_cluster_estimator = RLEstimator( entry_point="train-%s.py" % roboschool_problem, source_dir="src", dependencies=["common/sagemaker_rl"], image_uri=gpu_image_name, role=role, instance_type=primary_cluster_instance_type, instance_count=primary_cluster_instance_count, output_path=s3_output_path, base_job_name=job_name_prefix + "-primary", metric_definitions=metric_definitions, max_run=int(3600 * 0.5), # Maximum runtime in seconds hyperparameters={ "s3_prefix": s3_prefix, # Important for syncing "s3_bucket": s3_bucket, # Important for syncing "aws_region": boto3.Session().region_name, # Important for S3 connection "rl_cluster_type": "primary", # Important for syncing "rl_num_instances_secondary": secondary_cluster_instance_count, # Important for syncing "rl.training.config.num_workers": total_cpus, "rl.training.config.train_batch_size": 20000, "rl.training.config.num_gpus": total_gpus, }, subnets=default_subnets, # Required for VPC mode security_group_ids=default_security_groups, # Required for VPC mode ) primary_cluster_estimator.fit(wait=False) primary_job_name = primary_cluster_estimator.latest_training_job.job_name print("Primary Training job: %s" % primary_job_name) ``` ### Launch secondary cluster (2 CPU instances) ``` secondary_cluster_estimator = RLEstimator( entry_point="train-%s.py" % roboschool_problem, source_dir="src", dependencies=["common/sagemaker_rl"], image_uri=cpu_image_name, role=role, instance_type=secondary_cluster_instance_type, instance_count=secondary_cluster_instance_count, output_path=s3_output_path, base_job_name=job_name_prefix + "-secondary", metric_definitions=metric_definitions, max_run=3600, # Maximum runtime in seconds hyperparameters={ "s3_prefix": s3_prefix, # Important for syncing "s3_bucket": s3_bucket, # Important for syncing "aws_region": boto3.Session().region_name, # Important for S3 connection "rl_cluster_type": "secondary", # Important for syncing }, subnets=default_subnets, # Required for VPC mode security_group_ids=default_security_groups, # Required for VPC mode ) secondary_cluster_estimator.fit(wait=False) secondary_job_name = secondary_cluster_estimator.latest_training_job.job_name print("Secondary Training job: %s" % secondary_job_name) ``` ### Visualization ``` print("Job name: {}".format(primary_job_name)) s3_url = "s3://{}/{}".format(s3_bucket, primary_job_name) if local_mode: output_tar_key = "{}/output.tar.gz".format(primary_job_name) else: output_tar_key = "{}/output/output.tar.gz".format(primary_job_name) intermediate_folder_key = "{}/output/intermediate/".format(primary_job_name) output_url = "s3://{}/{}".format(s3_bucket, output_tar_key) intermediate_url = "s3://{}/{}".format(s3_bucket, intermediate_folder_key) print("S3 job path: {}".format(s3_url)) print("Output.tar.gz location: {}".format(output_url)) print("Intermediate folder path: {}".format(intermediate_url)) tmp_dir = "/tmp/{}".format(primary_job_name) os.system("mkdir {}".format(tmp_dir)) print("Create local folder {}".format(tmp_dir)) ``` ### Fetch videos of training rollouts Videos of certain rollouts get written to S3 during training. Here we fetch the last 10 videos from S3, and render the last one. ``` recent_videos = wait_for_s3_object( s3_bucket, intermediate_folder_key, tmp_dir, fetch_only=(lambda obj: obj.key.endswith(".mp4") and obj.size > 0), limit=10, ) last_video = sorted(recent_videos)[-1] # Pick which video to watch os.system("mkdir -p ./src/tmp_render/ && cp {} ./src/tmp_render/last_video.mp4".format(last_video)) HTML('<video src="./src/tmp_render/last_video.mp4" controls autoplay></video>') ``` ### Plot metrics for training job We can see the reward metric of the training as it's running, using algorithm metrics that are recorded in CloudWatch metrics. We can plot this to see the performance of the model over time. ``` %matplotlib inline from sagemaker.analytics import TrainingJobAnalytics df = TrainingJobAnalytics(primary_job_name, ["episode_reward_mean"]).dataframe() num_metrics = len(df) if num_metrics == 0: print("No algorithm metrics found in CloudWatch") else: plt = df.plot(x="timestamp", y="value", figsize=(12, 5), legend=True, style="b-") plt.set_ylabel("Mean reward per episode") plt.set_xlabel("Training time (s)") ``` And that's it! You can repeatedly run the visualization cells to get the latest videos or see the latest metrics as the training job proceeds.
github_jupyter
# Get patients image size and mask boundings into CSV * Each script uses only a single GPU, so we will distribute patients among shards in order to distribute or paralellize execution * For each shard, duplicate this script, set a unique SHARD_ID and execute it * This script: * Creates a directory named "patients-[shard_id]" with all its results * Creates a file "patients-analysis.csv" with all imaging analysis data * Snapshots 3 slides for each patient to directory "samples" ``` #only (patient_id%NR_SHARDS) == SHARD_ID will be processed here #choose a value between 1-4 SHARD_ID = 1 NR_SHARDS = 4 #Patient DICOM images folder INPUT_FOLDER = '../../input/sample_images/' OUTPUT_FOLDER = '../../output/' + str(SHARD_ID) + '/' %matplotlib inline import numpy as np # linear algebra from numpy import ndarray import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import statistics import csv import dicom from time import time import os import shutil import scipy.ndimage import matplotlib.pyplot as plt import matplotlib.patches as patches import scipy.ndimage as ndimage import itertools from itertools import product, combinations from skimage import measure, morphology from mpl_toolkits.mplot3d.art3d import Poly3DCollection class Timer: def __init__(self, name, debug=True): self._name = name self._debug = debug self.start() def start(self): self._start = time() if(self._debug): log('> [started] ' + self._name + '...') def stop(self): self._lastElapsed = (time()-self._start) if(self._debug): log('> [done] {} ({:.3f} ms)'.format(self._name, self._lastElapsed*1000)) def elapsed(self): if(self._lastElapsed != None): return (self._lastElapsed) else: return (time()-self._start) import datetime def log(message): print('{} {}'.format(datetime.datetime.now(), message)) def get_patient_ids(shard_id, input_folder): shard_patients = [] patients = os.listdir(input_folder) patients.sort() for p in patients: if(int(p,16)%NR_SHARDS == (shard_id-1)): shard_patients.append(p) return shard_patients # Load the scans in given folder path #image pixels dimensions: z, y, x def load_scan(path): t = Timer('load_scan ' + path) slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) try: slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2]) except: slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation) for s in slices: s.slice_thickness = slice_thickness t.stop() return slices #image pixels dimensions: z, y, x def get_pixels_hu(slices): image = np.stack([s.pixel_array for s in slices]) # Convert to int16 (from sometimes int16), # should be possible as values should always be low enough (<32k) image = image.astype(np.int16) # Set outside-of-scan pixels to 0 # The intercept is usually -1024, so air is approximately 0 image[image == -2000] = 0 # Convert to Hounsfield units (HU) for slice_number in range(len(slices)): intercept = slices[slice_number].RescaleIntercept slope = slices[slice_number].RescaleSlope if slope != 1: image[slice_number] = slope * image[slice_number].astype(np.float64) image[slice_number] = image[slice_number].astype(np.int16) image[slice_number] += np.int16(intercept) return np.array(image, dtype=np.int16) #image pixels dimensions: z, y, x def resample(image, scan, new_spacing=[1,1,1]): t = Timer('resample') # Determine current pixel spacing spacing = np.array([scan[0].slice_thickness] + scan[0].PixelSpacing, dtype=np.float32) resize_factor = spacing / new_spacing new_real_shape = image.shape * resize_factor new_shape = np.round(new_real_shape) real_resize_factor = new_shape / image.shape new_spacing = spacing / real_resize_factor image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') t.stop() return image, new_spacing def largest_label_volume(im, bg=-1): vals, counts = np.unique(im, return_counts=True) counts = counts[vals != bg] vals = vals[vals != bg] if len(counts) > 0: return vals[np.argmax(counts)] else: return None def segment_lung_mask(image, fill_lung_structures=True): t = Timer('segment_lung_mask') # 0 is treated as background, which we do not want binary_image = np.array(image > -320, dtype=np.int8)+1 labels = measure.label(binary_image) # Pick the pixel in the very corner to determine which label is air. # Improvement: Pick multiple background labels from around the patient # More resistant to "trays" on which the patient lays cutting the air # around the person in half background_label = labels[0,0,0] #Fill the air around the person binary_image[background_label == labels] = 2 # Method of filling the lung structures (that is superior to something like # morphological closing) if fill_lung_structures: # For every slice we determine the largest solid structure for i, axial_slice in enumerate(binary_image): axial_slice = axial_slice - 1 labeling = measure.label(axial_slice) l_max = largest_label_volume(labeling, bg=0) if l_max is not None: #This slice contains some lung binary_image[i][labeling != l_max] = 1 binary_image -= 1 #Make the image actual binary binary_image = 1-binary_image # Invert it, lungs are now 1 # Remove other air pockets insided body labels = measure.label(binary_image, background=0) l_max = largest_label_volume(labels, bg=0) if l_max is not None: # There are air pockets binary_image[labels != l_max] = 0 #dilate mask binary_image = scipy.ndimage.morphology.grey_dilation(binary_image, size=(10,10,10)) t.stop() return binary_image #returns ((x1, y1, z1), (x2, y2, z2)) def bounding_box(img): N = img.ndim out = [] for ax in itertools.combinations(range(N), N - 1): nonzero = np.any(img, axis=ax) out.extend(np.where(nonzero)[0][[0, -1]]) r = np.reshape(np.asarray(tuple(out)), (-1, 2)).T return [tuple(r[0]), tuple(r[1])] #return bounding box center in (x,y,z) def bounding_box_center(bounds): return (int(round((bounds[0][0] + (bounds[1][0]-bounds[0][0])/2))), int(round((bounds[0][1] + (bounds[1][1]-bounds[0][1])/2))), int(round((bounds[0][2] + (bounds[1][2]-bounds[0][2])/2)))) def generate_slice_shot(patient_pixels, patient_lung_mask, patient_id, slice_pos, output_dir): t = Timer('generate_slice_shot ' + str(slice_pos)) fig1, ax1 = plt.subplots(1) fig1.set_size_inches(6,6) masked_img = np.ma.masked_where(patient_lung_mask[slice_pos]==0, patient_pixels[slice_pos]) ax1.imshow(masked_img, cmap=plt.cm.gray) file = output_dir + patient_id + '-' + 'slice-' + str(slice_pos) + '.jpg' plt.savefig(file) plt.close(fig1) # plt.show() t.stop() def generate_patient_info(patient_pixels, patient_lung_mask, patient_scan, patient_id, append_to_csv_file): t = Timer('generate_patient_info') info = [] #patient_id info.append(patient_id) #image w,h,d info.append(np.shape(patient_pixels)[2]) info.append(np.shape(patient_pixels)[1]) info.append(np.shape(patient_pixels)[0]) #image volume mean t1 = Timer('flatten pixels') data = list(ndarray.flatten(patient_pixels)) t1.stop() t1 = Timer('calc mean') info.append(statistics.mean(data)) t1.stop() #slice original scan qtty,thickness info.append(len(patient_scan)) info.append(patient_scan[0].slice_thickness) #mask cx,cy,cz,w,h,d box = bounding_box(patient_lung_mask) box_center = bounding_box_center(box) info.append(box_center[0]) info.append(box_center[1]) info.append(box_center[2]) info.append(box[1][0]-box[0][0]) info.append(box[1][1]-box[0][1]) info.append(box[1][2]-box[0][2]) #append data to csv file with open(append_to_csv_file, 'a') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL) writer.writerow(info) t.stop() def process_patient(input_dir, patient_id, output_shots_dir, output_csv_file): patient_dir = input_dir + patient_id patient_scan = load_scan(patient_dir) patient_pixels = get_pixels_hu(patient_scan) patient_pixels_resampled, spacing = resample(patient_pixels, patient_scan, [1,1,1]) patient_lung_mask = segment_lung_mask(patient_pixels_resampled, True) generate_patient_info(patient_pixels_resampled, patient_lung_mask, patient_scan, patient_id, output_csv_file) ln = np.shape(patient_pixels_resampled)[0] generate_slice_shot(patient_pixels_resampled, patient_lung_mask, patient_id, int(ln/4), output_shots_dir) generate_slice_shot(patient_pixels_resampled, patient_lung_mask, patient_id, int(ln/4*2), output_shots_dir) generate_slice_shot(patient_pixels_resampled, patient_lung_mask, patient_id, int(ln/4*3), output_shots_dir) def start_processing(input_dir, shard_id, max_patients, output_dir): log('Processing patients. shard_id=' + str(shard_id) + ' max_patients='+ str(max_patients) + ' input_dir=' + input_dir + ' output_dir=' + output_dir) patient_ids = get_patient_ids(shard_id, input_dir) log('Number of patients: ' + str(len(patient_ids))) patients_count = 0 shutil.rmtree(output_dir, True) try: os.makedirs(output_dir) except: pass for patient_id in patient_ids: patients_count = patients_count + 1 if(patients_count>max_patients): break t = Timer('>>> PATIENT PROCESSING ' + patient_id + ' (count=' + str(patients_count) + '; output_dir=' + output_dir + ')') process_patient(input_dir, patient_id, output_dir + 'shots/', output_dir + 'patients.csv') t.stop() print('') start_processing(INPUT_FOLDER, SHARD_ID, 2, OUTPUT_FOLDER) ```
github_jupyter
# Sharp edges in Differentiable Swift Differentiable Swift has come a long way in terms of usability. Here is a heads-up about the parts that are still a little un-obvious. As progress continues, this guide will become smaller and smaller, and you'll be able to write differentiable code without needing special syntax. ##Loops Loops are differentiable, there's just one detail to know about. When you write the loop, wrap the bit where you specify what you're looping over in `withoutDerivative(at:)` ``` var a: [Float] = [1,2,3] ``` for example: ``` for _ in a.indices {} ``` becomes ``` for _ in withoutDerivative(at: a.indices) {} ``` or: ``` for _ in 0..<a.count {} ``` becomes ``` for _ in 0..<withoutDerivative(at: a.count) {} ``` This is necessary because the `Array.count` member doesn't contribute to the derivative with respect to the array. Only the actual elements in the array contribute to the derivative. If you've got a loop where you manually use an integer as the upper bound, there's no need to use `withoutDerivative(at:)`: ``` let iterations: Int = 10 for _ in 0..<iterations {} //this is fine as-is. ``` ##Map and Reduce `map` and `reduce` have special differentiable versions that work exactly like what you're used to: ``` a = [1,2,3] let aPlusOne = a.differentiableMap {$0 + 1} let aSum = a.differentiableReduce(0, +) print("aPlusOne", aPlusOne) print("aSum", aSum) ``` ##Array subscript sets Array subscript sets (`array[0] = 0`) aren't differentiable out of the box, but you can paste this extension: ``` extension Array where Element: Differentiable { @differentiable(where Element: Differentiable) mutating func updated(at index: Int, with newValue: Element) { self[index] = newValue } @derivative(of: updated) mutating func vjpUpdated(at index: Int, with newValue: Element) -> (value: Void, pullback: (inout TangentVector) -> (Element.TangentVector)) { self.updated(at: index, with: newValue) return ((), { v in let dElement = v[index] v.base[index] = .zero return dElement }) } } ``` and then the workaround syntax is like this: ``` var b: [Float] = [1,2,3] ``` instead of this: ``` b[0] = 17 ``` write this: ``` b.updated(at: 0, with: 17) ``` Let's make sure it works: ``` func plusOne(array: [Float]) -> Float{ var array = array array.updated(at: 0, with: array[0] + 1) return array[0] } let plusOneValAndGrad = valueWithGradient(at: [2], in: plusOne) print(plusOneValAndGrad) ``` The error you'll get without this workaround is `Differentiation of coroutine calls is not yet supported`. Here is the link to see progress on making this workaround unnecessary: https://bugs.swift.org/browse/TF-1277 (it talks about Array.subscript._modify, which is what's called behind the scenes when you do an array subscript set). ##`Float` <-> `Double` conversions If you're switching between `Float` and `Double`, their constructors aren't already differentiable. Here's a function that will let you go from a `Float` to a `Double` differentiably. (Switch `Float` and `Double` in the below code, and you've got a function that converts from `Double` to `Float`.) You can make similar converters for any other real Numeric types. ``` @differentiable func convertToDouble(_ a: Float) -> Double { return Double(a) } @derivative(of: convertToDouble) func convertToDoubleVJP(_ a: Float) -> (value: Double, pullback: (Double) -> Float) { func pullback(_ v: Double) -> Float{ return Float(v) } return (value: Double(a), pullback: pullback) } ``` Here's an example usage: ``` @differentiable func timesTwo(a: Float) -> Double { return convertToDouble(a * 2) } let input: Float = 3 let valAndGrad = valueWithGradient(at: input, in: timesTwo) print("grad", valAndGrad.gradient) print("type of input:", type(of: input)) print("type of output:", type(of: valAndGrad.value)) print("type of gradient:", type(of: valAndGrad.gradient)) ``` ##Transcendental and other functions (sin, cos, abs, max) A lot of transcendentals and other common built-in functions have already been made differentiable for `Float` and `Double`. There are fewer for `Double` than `Float`. Some aren't available for either. So here are a few manual derivative definitions to give you the idea of how to make what you need, in case it isn't already provided: pow (see [link](https://www.wolframalpha.com/input/?i=partial+derivatives+of+f%28x%2Cy%29+%3D+x%5Ey) for derivative explanation) ``` import Foundation @usableFromInline @derivative(of: pow) func powVJP(_ base: Double, _ exponent: Double) -> (value: Double, pullback: (Double) -> (Double, Double)) { let output: Double = pow(base, exponent) func pullback(_ vector: Double) -> (Double, Double) { let baseDerivative = vector * (exponent * pow(base, exponent - 1)) let exponentDerivative = vector * output * log(base) return (baseDerivative, exponentDerivative) } return (value: output, pullback: pullback) } ``` max ``` @usableFromInline @derivative(of: max) func maxVJP<T: Comparable & Differentiable>(_ x: T, _ y: T) -> (value: T, pullback: (T.TangentVector) -> (T.TangentVector, T.TangentVector)) { func pullback(_ v: T.TangentVector) -> (T.TangentVector, T.TangentVector) { if x < y { return (.zero, v) } else { return (v, .zero) } } return (value: max(x, y), pullback: pullback) } ``` abs ``` @usableFromInline @derivative(of: abs) func absVJP<T: Comparable & SignedNumeric & Differentiable>(_ x: T) -> (value: T, pullback: (T.TangentVector) -> T.TangentVector) { func pullback(_ v: T.TangentVector) -> T.TangentVector{ if x < 0 { return .zero - v } else { return v } } return (value: abs(x), pullback: pullback) } ``` sqrt (see [link](https://www.wolframalpha.com/input/?i=partial+derivative+of+f%28x%29+%3D+sqrt%28x%29) for derivative explanation) ``` @usableFromInline @derivative(of: sqrt) func sqrtVJP(_ x: Double) -> (value: Double, pullback: (Double) -> Double) { let output = sqrt(x) func pullback(_ v: Double) -> Double { return v / (2 * output) } return (value: output, pullback: pullback) } ``` Let's check that these work: ``` let powGrad = gradient(at: 2, 2, in: pow) print("pow gradient: ", powGrad, "which is", powGrad == (4.0, 2.772588722239781) ? "correct" : "incorrect") let maxGrad = gradient(at: 1, 2, in: max) print("max gradient: ", maxGrad, "which is", maxGrad == (0.0, 1.0) ? "correct" : "incorrect") let absGrad = gradient(at: 2, in: abs) print("abs gradient: ", absGrad, "which is", absGrad == 1.0 ? "correct" : "incorrect") let sqrtGrad = gradient(at: 4, in: sqrt) print("sqrt gradient: ", sqrtGrad, "which is", sqrtGrad == 0.25 ? "correct" : "incorrect") ``` The compiler error that alerts you to the need for something like this is: `Expression is not differentiable. Cannot differentiate functions that have not been marked '@differentiable' and that are defined in other files` ##`KeyPath` subscripting `KeyPath` subscripting (get or set) doesn't work out of the box, but once again, there are some extensions you can add, and then use a workaround syntax. Here it is: https://github.com/tensorflow/swift/issues/530#issuecomment-687400701 This workaround is a little uglier than the others. It only works for custom objects, which must conform to Differentiable and AdditiveArithmetic. You have to add a `.tmp` member and a `.read()` function, and you use the `.tmp` member as intermediate storage when doing `KeyPath` subscript gets (there is an example in the linked code). `KeyPath` subscript sets work pretty simply with a `.write()` function.
github_jupyter
## About how to train your own dataset It's optional to use lmdb format or ordinary format. ### Ordinary format Please copy the code of '/data/BSD/py' to create your dataset file '[dataset].py' and modify '\_generate_samples()' function according to your directory structure. ### Lmdb format Here, we take GOPRO-DS as example. First, download and unzip the source dataset ["*gopro_ds*"](https://drive.google.com/file/d/1oICQVSIrDmaMB6R888uyGXRmWcEEQVvy/view?usp=sharing). Then, run the following code to create lmdb file (i.e., generate 'gopro_ds_lmdb' for 'gopro_ds'): ``` import os import numpy as np import matplotlib.pyplot as plt import cv2 import lmdb import pickle from os.path import join dataset_name = 'gopro_ds' # replace with the directory name of your dataset data_root = '/home/zhong/Dataset/' # replace with your own path data_path = join(data_root, dataset_name) lmdb_path = join(data_root, dataset_name+'_lmdb') os.makedirs(lmdb_path, exist_ok=True) for dataset_type in ['train', 'valid']: # create meta-info pkl files for the dataset path = join(data_path, dataset_type) seqs = os.listdir(path) seqs_info = {} length = 0 for i in range(len(seqs)): seq_info = {} seq_info['seq'] = seqs[i] length_temp = len(os.listdir(join(path,seqs[i],'blur_gamma'))) seq_info['length'] = length_temp length += length_temp seqs_info[i] = seq_info seqs_info['length'] = length seqs_info['num'] = len(seqs) f = open(join(lmdb_path,'{}_info_{}.pkl'.format(dataset_name, dataset_type)), 'wb') pickle.dump(seqs_info, f) f.close() for dataset_label in [dataset_type, '{}_gt'.format(dataset_type)]: for i in range(seqs_info['num']): env = lmdb.open(join(lmdb_path, '{}_{}'.format(dataset_name, dataset_label)), map_size=1099511627776) txn = env.begin(write=True) if dataset_label.endswith('gt'): subpath = join(path, seqs_info[i]['seq'], 'sharp') else: subpath = join(path, seqs_info[i]['seq'], 'blur_gamma') imgs = os.listdir(subpath) nums = [int(img.split('.')[0]) for img in imgs] # make sure your images are named by numbers, e.g., 0001.png. nums.sort() gap = nums[0]-0 for img in imgs: img_path = join(subpath, img) seq_idx = i frame_idx = int(img.split('.')[0])-gap key = '%03d_%08d' % (seq_idx, frame_idx) data = cv2.imread(img_path) txn.put(key=key.encode(), value=data) txn.commit() env.close() ``` Then, use the following code to check if the lmdb file is valid. ``` H,W,C = 540,960,3 # tarin set env = lmdb.open(join(lmdb_path, '{}_train'.format(dataset_name)), map_size=1099511627776) env_gt = lmdb.open(join(lmdb_path, '{}_train_gt'.format(dataset_name)), map_size=1099511627776) txn = env.begin() txn_gt = env_gt.begin() seq = 21 frame = 39 key = '{:03d}_{:08d}'.format(seq, frame) test = txn.get(key.encode()) test = np.frombuffer(test, dtype='uint8') test = test.reshape(H,W,C) test_gt = txn_gt.get(key.encode()) test_gt = np.frombuffer(test_gt, dtype='uint8') test_gt = test_gt.reshape(H,W,C) plt.imshow(test[:,:,::-1]) plt.figure() plt.imshow(test_gt[:,:,::-1]) plt.show() env.close() env_gt.close() # valid set env = lmdb.open(join(lmdb_path, '{}_valid'.format(dataset_name)), map_size=1099511627776) env_gt = lmdb.open(join(lmdb_path, '{}_valid_gt'.format(dataset_name)), map_size=1099511627776) txn = env.begin() txn_gt = env_gt.begin() seq = 8 frame = 39 key = '{:03d}_{:08d}'.format(seq, frame) test = txn.get(key.encode()) test = np.frombuffer(test, dtype='uint8') test = test.reshape(H,W,C) test_gt = txn_gt.get(key.encode()) test_gt = np.frombuffer(test_gt, dtype='uint8') test_gt = test_gt.reshape(H,W,C) plt.imshow(test[:,:,::-1]) plt.figure() plt.imshow(test_gt[:,:,::-1]) plt.show() env.close() env_gt.close() ``` After generating the lmdb files, you need to creat a python file '[dataset]\_lmdb.py' under '/data' with the same name as the directory of your lmdb dataset, like 'gopro_ds_lmdb.py'. Then, copy the code of 'gopro_ds_lmdb.py' to your dataset file '[dataset]\_lmdb.py'. You need to modify the following arguments according to your dataset: ``` ds_name = 'gopro_ds' # [dataset] self.W = 960 # width of image self.H = 540 # height of image self.C = 3 # channel of image ``` Finally, hopefully you can train your dataset by specifying the name of the dataset in cmd or default value in '/para/parameter.py'.
github_jupyter
# Linear regression with Variational Bayes ### Imports ``` import matplotlib.pyplot as plt %matplotlib notebook import numpy as np from scipy.stats import multivariate_normal ``` ### Define model and generate data ``` N = 10 # No. data points w0 = 1. # The offset in the line y = w0 + w1 * x w1 = .5 # The incline in the same line gamma = 4. # The *precision* in the observation noise st_dev = 1. / np.sqrt(gamma) # And corresponding standard deviation np.random.seed(42) x = 5 * np.random.rand(N) - 1 # The x-points are sampled uniformly on [-1, 4] y = np.random.normal(loc=w0 + w1 * x, scale=st_dev) # And the response is sampled from the Normal ``` ### Plotting of data (i.e., $x$-axis is the covariate, $y$-axis the response) ``` def data_plotter(x, y=None, true_w0=None, true_w1=None, approx_w0=None, approx_w1=None): """ Use to plot data. If y is not noe it contains responses, and (x,y) will be scatter-plotted If neither true_w0 nor true_w1 is None, we will plot the line true_w0 + x * true_w1 in red. If neither approx_w0 nor approx_w1 is None, we plot the line approx_w0 + x * approx_w1 in green. """ if y is not None: plt.plot(x, y, "bo") # Plot true line if given if true_w0 is not None and true_w1 is not None: plt.plot(x, true_w0 + true_w1 * x, "r-") # Plot approximation if given if approx_w0 is not None and approx_w1 is not None: plt.plot(x, approx_w0+ approx_w1* x, "g-", alpha=.2) ``` ### ... and of densities ($x$-axis correspond to offset $w_0$, $y$-axis the incline $w_1$) ``` def density_plt(x_range, y_range, true_loc=None, true_cov=None, approx_loc=None, approx_cov=None): """ Same setup as above: We can choose to plot the "true" solution (in red) and/or the approximation (in green) """ x = np.linspace(x_range[0], x_range[1], 100) y = np.linspace(y_range[0], y_range[1], 100) x_mesh, y_mesh = np.meshgrid(x, y) pos = np.empty(x_mesh.shape + (2,)) pos[:, :, 0] = x_mesh pos[:, :, 1] = y_mesh if true_loc is not None and true_cov is not None: rv = multivariate_normal(true_loc, true_cov) plt.contour(x, y, rv.pdf(pos), colors='r') if approx_loc is not None and true_cov is not None: rv = multivariate_normal(approx_loc, approx_cov) plt.contour(x, y, rv.pdf(pos), colors='g') ``` ### Check that it works: Plot the data with the true model on top, and the prior over ($w_0$, $w_1$) ``` # Plot data data_plotter(x=x, y=y, true_w0=w0, true_w1=w1) plt.show() # Plot prior of (w0, w1) density_plt(x_range=[-2, 2], y_range=[-2, 2], true_loc=[0, 0], true_cov=[[1, 0], [0, 1]]) plt.show() ``` ## Learn the parameters using the variational Bayes formulas We have **two** variables of interest here, $w_0$ and $w_1$. Both are Gaussian a posteriori, and they are parameterized by their **mean** and **precision** (inverse variance). The update rules are as follows: * `q_0_prec` := $1 + \gamma \cdot N$. * `q_0_mean` := $\gamma \cdot (\sum_i y_i - $ `q_1_mean` $ \cdot \sum_i x_i) /$ `q_0_prec`. * `q_1_prec` := $1 + \gamma \cdot \sum_i x_i^2$. * `q_1_mean` := $\gamma \cdot (\sum_i x_i y_i - $ `q_0_mean` $\cdot \sum_i x_i) /$ `q_1_prec`. ``` # Starting-point q_0_mean = 0. q_1_mean = 0. q_0_prec = 1. q_1_prec = 1. # Iterate for iter in range(25): q_0_prec = q_0_mean = q_1_prec = q_1_mean = print("Iter {:2d}: W0: {:6.3f} +/- {:6.3f}".format(iter, q_0_mean, 1./np.sqrt(q_0_prec)), "\tW1: {:6.3f} +/- {:6.3f}".format(q_1_mean, 1./np.sqrt(q_1_prec)) ) ``` ## Show off ### The variables `q_0_mean`, `q_0_prec`, `q_1_mean`, and `q_1_prec` must be filled for this to work ### First draw some random lines, i.e., values $(w_0, w_1)$, from the Variational Bayes posterior ``` for _ in range(100): w0_sample = np.random.normal(loc=q_0_mean, scale=1/np.sqrt(q_0_prec)) w1_sample = np.random.normal(loc=q_1_mean, scale=1 / np.sqrt(q_1_prec)) data_plotter(x=x, approx_w0=w0_sample, approx_w1=w1_sample) data_plotter(x=x, y=y) plt.show() ``` ### And finally, look at the joint pdf of $(w_0, w_1)$ from VB compared to the exact Bayesian solution ``` extended_x = np.ones((N, 2)) extended_x[:, 1] = x kernel = np.linalg.inv(np.eye(2) / gamma + np.matmul(np.transpose(extended_x), extended_x)) bayesian_mean = np.matmul(kernel, np.matmul(np.transpose(extended_x), y)) bayesian_cov = kernel / gamma density_plt(x_range=[w0 - 2. * st_dev, w0 + 2. * st_dev], y_range=[w1 - 2. * st_dev, w1 + 2. * st_dev], true_loc=bayesian_mean, true_cov=bayesian_cov, approx_loc=[q_0_mean, q_1_mean], approx_cov=[[1/q_0_prec, 0], [0, 1/q_1_prec]]) plt.show() ```
github_jupyter
``` import json import numpy as np import sys import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") from scipy import stats import pandas as pd from tqdm import tqdm from itertools import islice from nltk.corpus import stopwords from sklearn.manifold import TSNE from sklearn.decomposition import PCA from scipy.spatial.distance import cosine from google.colab import drive drive.mount('/content/drive') !jupyter nbextension enable --py widgetsnbextension def get_windows(seq,n): ''' returns a sliding window (of width n) over data from the iterable taken from: https://stackoverflow.com/questions/6822725/rolling-or-sliding-window-iterator/6822773#6822773 ''' it = iter(seq) result = tuple(islice(it, n)) if len(result) == n: yield result for elem in it: result = result[1:] + (elem,) yield result def sample_examples(docs,max_window_size,n_windows): '''generate target,context pairs and negative examples''' windows = [] for i,doc in enumerate(docs): windows.append(list(get_windows(doc, 2*np.random.randint(1, max_window_size) + 1 ))) windows = [elt for sublist in windows for elt in sublist] # flatten windows = list(np.random.choice(windows,size=n_windows)) # select a subset all_negs = list(np.random.choice(token_ints, size=n_negs*len(windows), p=neg_distr)) return windows,all_negs def compute_dot_products(pos,negs,target): prods = Wc[pos+negs,] @ Wt[target,] # (n_pos+n_negs,d) X (d,) -> (n_pos+n_negs,) return prods def compute_loss(prodpos,prodnegs): '''prodpos and prodnegs are numpy vectors containing the dot products of the context word vectors with the target word vector''' term_pos, term_negs = np.log(1 + np.exp(-prodpos)), np.log(1 + np.exp(prodnegs)) return np.sum(term_pos) + np.sum(term_negs) def compute_gradients(pos,negs,target,prodpos,prodnegs): factors_pos = 1/(np.exp(prodpos)+1) factors_negs = 1/(np.exp(-prodnegs)+1) partial_pos = np.array([factors_pos[k,] * -Wt[target,] for k in range(len(factors_pos))]) partial_negs = np.array([factors_negs[k,] * Wt[target,] for k in range(len(factors_negs))]) term_pos = - Wc[pos,].T @ factors_pos term_negs = Wc[negs,].T @ factors_negs partial_target = np.sum(term_pos,axis=0) + np.sum(term_negs,axis=0) return partial_pos, partial_negs,partial_target # = = = = = = = = = = = = = = = = = = = = = max_window_size = 5 # extends on both sides of the target word n_windows = int(1e6) # number of windows to sample at each epoch n_negs = 5 # number of negative examples to sample for each positive d = 64 # dimension of the embedding space n_epochs = 15 lr_0 = 0.03 decay = 1e-6 resume = True train = True with open('/content/drive/MyDrive/nlp_centrale/imdb_files/doc_ints.txt', 'r') as file: docs = file.read().splitlines() docs = [[int(eltt) for eltt in elt.split()] for elt in docs] with open('/content/drive/MyDrive/nlp_centrale/imdb_files/vocab.json', 'r') as file: vocab = json.load(file) vocab_inv = {v:k for k,v in vocab.items()} with open('/content/drive/MyDrive/nlp_centrale/imdb_files/counts.json', 'r') as file: counts = json.load(file) token_ints = range(1,len(vocab)+1) neg_distr = [counts[vocab_inv[elt]] for elt in token_ints] neg_distr = np.sqrt(neg_distr) neg_distr = neg_distr/sum(neg_distr) # normalize # ========== train model ========== if train: total_its = int(1e6)*13 if not resume : Wt = np.random.normal(size=(len(vocab)+1,d)) # + 1 is for the OOV token Wc = np.random.normal(size=(len(vocab)+1,d)) else: Wt = np.load('/content/drive/MyDrive/nlp_centrale/imdb_files/input_vecs.npy') Wc = np.load('/content/drive/MyDrive/nlp_centrale/imdb_files/output_vecs.npy') for epoch in range(n_epochs): print("Epoch : %i/%i"%(epoch+1, n_epochs)) windows,all_negs = sample_examples(docs,max_window_size,n_windows) print('training examples sampled') np.random.shuffle(windows) total_loss = 0 with tqdm(total=len(windows),unit_scale=True,postfix={'loss':0.0,'lr':lr_0},ncols=50) as pbar: #desc="Epoch : %i/%i" % (epoch+1, n_epochs) for i,w in enumerate(windows): target = w[int(len(w)/2)] # elt at the center pos = list(w) del pos[int(len(w)/2)] # all elts but the center one negs = all_negs[n_negs*i:n_negs*i+n_negs] prods = compute_dot_products(pos,negs,target) prodpos = prods[0:len(pos),] prodnegs = prods[len(pos):(len(pos)+len(negs)),] partials_pos,partials_negs,partial_target = compute_gradients(pos,negs,target,prodpos,prodnegs) lr = lr_0 * 1/(1+decay*total_its) total_its += 1 Wt[target,] -= lr * partial_target Wc[pos,] -= partials_pos * lr Wc[negs,] -= partials_negs * lr total_loss += compute_loss(prodpos,prodnegs) loss_printed = round(total_loss/(i+1), 4) lr_printed = round(lr, 4) pbar.set_postfix({"loss" : str(loss_printed), "lr" : str(lr_printed)}) pbar.update(1) if epoch % 1 == 0: np.save('/content/drive/MyDrive/nlp_centrale/imdb_files/input_vecs',Wt,allow_pickle=False) # pickle disabled for portability reasons np.save('/content/drive/MyDrive/nlp_centrale/imdb_files/output_vecs',Wc,allow_pickle=False) print('word vectors saved to disk') else: Wt = np.load('/content/drive/MyDrive/nlp_centrale/imdb_files/input_vecs.npy') Wc = np.load('/content/drive/MyDrive/nlp_centrale/imdb_files/output_vecs.npy') def my_cos_similarity(word1,word2): try: embed_1 = Wt[vocab[word1],].reshape(1,-1) except KeyError: embed_1 = Wt[0,].reshape(1,-1) try: embed_2 = Wt[vocab[word2],].reshape(1,-1) except KeyError: embed_2 = Wt[0,].reshape(1,-1) sim = cosine(embed_1, embed_2) return round(float(sim),4) def loadPairs(path): data = pd.read_csv(path, delimiter='\t') pairs = zip(data['word1'], data['word2'], data['SimLex999']) return pairs pairs = loadPairs("/content/drive/MyDrive/nlp_centrale/SimLex-999.txt") our_similarities,original_similarities = [],[] for a, b, original_similarity in pairs: our_similarities.append(my_cos_similarity(a, b)) original_similarities.append(original_similarity) corr = stats.spearmanr(our_similarities,original_similarities).correlation print('spearman correlation :',corr) ```
github_jupyter
![imagen](../../imagenes/ejercicios.png) # Ejercicios Python Basics II ## Ejercicio 1 * Crea dos variables numericas: un `int` y un `float` * Comprueba sus tipos * Sumalas en otra nueva * ¿De qué tipo es la nueva variable? * Elimina las dos primeras variables creadas ``` var1 = 4 var2 = 6.0 print(type(var1)) print(type(var2)) var3 = var1 + var2 print(type(var3)) del var1, var2 ``` ## Ejercicio 2 Escribe un programa para pasar de grados a radianes. Hay que usar `input`. Recuerda que la conversión se realiza mediante radianes = grados*(pi/180) ``` pi=22/7 degree = float(input("Input grados: ")) radian = degree*(pi/180) print(radian) ``` ## Ejercicio 3 Escribe un programa que calcule el area de un paralelogramo (base x altura). Tambien con `input` ``` base = float(input('Length of base: ')) height = float(input('Measurement of height: ')) area = base * height print("Area is: ", area) ``` ## Ejercicio 4 Tenemos las siguientes variables: ```Python A = 4 B = "Text" C = 4.1 ``` Comprueba: 1. Si A y B son equivalentes 2. Si A y C NO son equivalentes 3. Si A es mayor que C 4. Si C es menor o igual que A 5. Si B NO es equivalente a C ``` A = 4 B = "Text" C = 4.1 print(A == B) print(A != C) print(A > C) print(C <= A) print(B != C) ``` ## Ejercicio 5 Crea un programa donde se recojan dos inputs del usuario, y el output del programa sea si esos inputs son iguales o no ``` inp_1 = input("Input 1: ") inp_2 = input("Input 2: ") print(inp_1 == inp_2) ``` ## Ejercicio 6 Mismo programa que en 5, pero en esta ocasión tienen que ser tres inputs y dos salidas. Una de las salidas que nos indique si todos son iguales, y la otra si al menos dos inputs sí que lo son ``` inp_1 = input("Input 1: ") inp_2 = input("Input 2: ") inp_3 = input("Input 3: ") todos = inp_1 == inp_2 and inp_1 == inp_3 and inp_2 == inp_3 print("Todos son iguales:", todos) dos = inp_1 == inp_2 or inp_1 == inp_3 or inp_2 == inp_3 print("Todos son iguales:", dos) ``` ## Ejercicio 7 Crea un programa que recoja dos inputs. Tiene que comprobar si su suma es igual, superior o inferior a 10 ``` inp_1 = input("Input 1: ") inp_2 = input("Input 2: ") suma = float(inp_1) + float(inp_2) print("Es mayor que 10:", suma > 10) print("Es menor que 10:", suma < 10) print("Es igual que 10:", suma == 10) ``` ## Ejercicio 8 Razona sin ejecutar código el output que obtendremos de las siguientes sentencias 1. True and True and False 2. not ((True or False) and (True or False)) 3. (False or False or False or True) and False 4. not False and True and not True 1. False 2. False 3. False 4. False ## Ejercicio 9 Para este ejercicio vamos a poner en práctica [las funciones built in](https://docs.python.org/3/library/functions.html). 1. Calcula el máximo de la lista: [4, 6, 8, -1] 2. Suma todos los elementos de la lista anterior 3. Redondea este float a 3 dígitos decimales: 63.451256965 4. Valor absoluto de: -74 ``` print(max([4, 6, 8, -1])) print(sum([4, 6, 8, -1])) print(round(63.451256965, 3)) print(abs(-74)) ``` ## Ejercicio 10 Para el siguiente string se pide imprimir por pantalla los siguientes casos "A quien madruga, dios le ayuda" 1. Pasarlo todo a mayusculas 2. Pasarlo todo a minusculas 3. Solo las iniciales de las palabras 4. Crea una lista dividiendolo por sus espacios 5. Sustituye las comas `,` por puntos y comas `;` 6. Elimina las `a` minusculas ``` ej_10 = "A quien madruga, dios le ayuda" print(ej_10.upper()) print(ej_10.lower()) print(ej_10.title()) print(ej_10.split(' ')) print(ej_10.replace(',', ';')) print(ej_10.replace('a', '')) ``` ## Ejercicio 11 1. Crea una lista con 3 elementos numéricos 2. Añade un cuarto elemento 3. Calcula la suma de todos 4. Elimina el segundo elemento de la lista 5. Añade otro elemento en la posicion 3 de la lista 6. Crea otra lista con 4 elementos y concatenala a la que ya tenías. ``` list_11 = [1, 2, 3] list_11.append(4) print(sum(list_11)) list_11.remove(1) print(list_11) list_11.insert(2, 5) print(list_11) list_11_2 = [6, 7, 8, 9] print(list_11 + list_11_2) ```
github_jupyter
# Chapter 5 - Image Classification > Deep Learning For Coders with fastai & Pytorch - Image Classification, In this notebook I followed both Jeremy Howard's Lesson on fast.ai and Weigh and Biases reading group videos. Lots of notes added, some cell's order changed some are added to make the topic more understandable for me. (Check Manual calculation `log_softmax` + `nll_loss`). Click `open in colab` button at the right side to view as notebook. - toc: true - badges: true - comments: true - categories: [fastbook] - image: images/cyberman.png *** ![](images/chapter-05/cyberman.png) > I'm a Doctor Who fan and this is my cyberman coffee cup, as I remember got it from Manchester Science Museum. *** ``` #!pip install -Uqq fastbook import fastbook fastbook.setup_book() %config Completer.use_jedi = False from fastbook import * ``` [[chapter_pet_breeds]] ## PLAYING WITH THE DATASET ``` from fastai.vision.all import * path = untar_data(URLs.PETS) ``` > Note: __With `untar` we download the data. This data originally come from Oxford University [Visual Geomety Group](https://www.robots.ox.ac.uk/~vgg/data/) and our dataset is [here:](https://www.robots.ox.ac.uk/~vgg/data/pets/)__ ``` path ``` > Note: __This is the local download path for my computer.__ ``` Path.BASE_PATH = path ``` > Tip: **This is a trick to get the relative path, check above and below** ``` path ``` Now the `path` is looks different. ``` path.ls() ``` > Note: __`#2` is number of item in the list. `annotations` represents target variables of this datasets but we do not use them at this time instead we create our own labels.__ ``` (path/"images").ls() fname = (path/"images").ls()[0] fname ``` > Note: __The first image in the `path` list.__ ``` re.findall(r'(.+)_\d+.jpg$', fname.name) ``` > Note: __Since we don't use the annonations in the Dataset we need to find a way to get breeds form the filename. This is regex `findall` method, Check `geeksforgeeks.org` tutorial [here](https://www.geeksforgeeks.org/python-regex-re-search-vs-re-findall/)__ ``` pets = DataBlock(blocks = (ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(seed=42), get_y=using_attr(RegexLabeller(r'(.+)_\d+.jpg$'), 'name'), item_tfms=Resize(460), batch_tfms=aug_transforms(size=224, min_scale=0.75)) dls = pets.dataloaders(path/"images") ``` > Note: __now find all names with RegexLabeller. The `item_tmsf` and `batch_transfdrms` may look a bit meaningless. Check below to find out why.__ *** ### PRESIZING As a summary FastAi gives a chance to augment our images in a smarter way (`presizing`) such that provide much more detail and information for the training. First, we presize images with `item_tfms` then push them to GPU and use augmentation. [check the original document for the whole idea](https://colab.research.google.com/github/fastai/fastbook/blob/master/05_pet_breeds.ipynb) ``` #id interpolations #caption A comparison of fastai's data augmentation strategy (left) and the traditional approach (right). dblock1 = DataBlock(blocks=(ImageBlock(), CategoryBlock()), get_y=parent_label, item_tfms=Resize(460)) # Place an image in the 'images/grizzly.jpg' subfolder where this notebook is located before running this dls1 = dblock1.dataloaders([(Path.cwd()/'images'/'chapter-05'/'grizzly.jpg')]*100, bs=8) dls1.train.get_idxs = lambda: Inf.ones x,y = dls1.valid.one_batch() _,axs = subplots(1, 2) x1 = TensorImage(x.clone()) x1 = x1.affine_coord(sz=224) x1 = x1.rotate(draw=30, p=1.) x1 = x1.zoom(draw=1.2, p=1.) x1 = x1.warp(draw_x=-0.2, draw_y=0.2, p=1.) tfms = setup_aug_tfms([Rotate(draw=30, p=1, size=224), Zoom(draw=1.2, p=1., size=224), Warp(draw_x=-0.2, draw_y=0.2, p=1., size=224)]) x = Pipeline(tfms)(x) #x.affine_coord(coord_tfm=coord_tfm, sz=size, mode=mode, pad_mode=pad_mode) TensorImage(x[0]).show(ctx=axs[0]) TensorImage(x1[0]).show(ctx=axs[1]); dls.show_batch(nrows=3, ncols=3) pets1 = DataBlock(blocks = (ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(seed=42), get_y=using_attr(RegexLabeller(r'(.+)_\d+.jpg$'), 'name')) pets1.summary(path/"images") ``` > Note: __It is alway good to get a quick summary. `pets1.summary(path/"images")` Check the summary above, it has lots of details. It is natural to get an error in this example because we are trying the put diffent size images into the same `DataBlock`.__ *** ## BASELINE MODEL For every project, just start with a Baseline. Baseline is a good point to think about the project/domain/problem at the same time, then start improve and make experiments about architecture, hyperparameters etc. ``` learn = cnn_learner(dls, resnet34, metrics=error_rate) learn.fine_tune(2) ``` > Note: __A basic run is helpful as baseline for the beginning.__ ### Defaults for the baseline ``` learn.loss_func learn.lr ``` > Tip: __Very easy to see default arguments for the learner. Above loss function `loss_func` and learning rate `lr`.__ ### One Batch Run ``` first(dls.train) ``` > Note: above and below is same ``` x,y = dls.one_batch() ``` *** ### Understanding Labels ``` dls.vocab dls.vocab[0] ``` > Tip: __`vocab` gives as all labels as text.__ *** ### What's inside the tensors? ``` y ``` > Note: __Targets as coded.__ ``` x ``` > Note: __Our stacked image tensor.__ *** ### Predictions of the baseline model. ``` preds,_ = learn.get_preds(dl=[(x,y)]) preds[0] ``` > Note: __result for first item that adds up to one. There are 37 outputs for 37 image categories and the results are in percentage for probability of each category.__ ``` _ ``` > Note: __Category codes__ ``` len(preds[0]),preds[0].sum() ``` Prediction for 37 categories that adds up to one. *** ## FUNCTION FOR CLASSIFIYING MORE THAN TWO CATEGORY For classifiying more than two category, we need to employ a new function. It is not totally different than sigmoid, in fact it starts with a sigmoid function. ``` plot_function(torch.sigmoid, min=-4,max=4) ``` > Note: __This is how `torch.sigmoid` squishes values between 0 and 1.__ ``` torch.random.manual_seed(42); acts = torch.randn((6,2))*2 acts ``` > Note: __These are random numbers represent binary results of a hypothetical network. First colums represent 3's the and second is 7's standart deviation of 2. It generally shows how confident the model about the predictions.__ ``` acts.sigmoid() ``` > Note: __If we apply the sigmoid, the result become like this(above). Obviously they aren't adds up to one. These are relative confidence over inputs. For example first row says: it's a three. But what is the probability? It is not clear.__ ``` (acts[:,0]-acts[:,1]).sigmoid() ``` > Note: __If we take the difference between these relative confidence the results become like this above: Now we can say that for the first item, model is 0.6025 (%60.25) confident.__ this part is a bit different in the lesson video. so check the video. [1:35:20](https://youtu.be/p50s63nPq9I?t=5721) ``` sm_acts = torch.softmax(acts, dim=1) sm_acts ``` > Note: __`torch.softmax` does that in one step. Now results for each item adds up to one and identical.__ *** ### __Log Likelihood__ ``` targ = tensor([0,1,0,1,1,0]) ``` this is our softmax activations: ``` sm_acts idx = range(6) sm_acts[idx, targ] ``` > Note: __Nice trick for getting confidence level for each item.__ lets see everything in a table: ``` from IPython.display import HTML df = pd.DataFrame(sm_acts, columns=["3","7"]) df['targ'] = targ df['idx'] = idx df['loss'] = sm_acts[range(6), targ] t = df.style.hide_index() #To have html code compatible with our script html = t._repr_html_().split('</style>')[1] html = re.sub(r'<table id="([^"]+)"\s*>', r'<table >', html) display(HTML(html)) ``` > Warning: __I think the last label is wrong here. It must be the confidence instead.__ ``` -sm_acts[idx, targ] ``` > Warning: __There is a caveat here. These are neg of our confidence level, not loss.__ Pytorch way of doing the same here: ``` F.nll_loss(sm_acts, targ, reduction='none') ``` > Note: __Anyway, numbers are still not right, that will be addresses in the `Taking the Log` section below. The reason is F.nll_loss (negative log likelihood loss) needs arguments such that log is already applied to make the calculation right.(loss)__ *** ### Taking the Log > Note: Directly from the book: > Important: __Confusing Name, Beware: The nll in `nll_loss` stands for "negative log likelihood," but it doesn't actually take the log at all! It assumes you have _already_ taken the log. PyTorch has a function called `log_softmax` that combines `log` and `softmax` in a fast and accurate way. `nll_loss` is designed to be used after `log_softmax`.__ When we first take the softmax, and then the log likelihood of that, that combination is called *cross-entropy loss*. In PyTorch, this is available as `nn.CrossEntropyLoss` (which, in practice, actually does `log_softmax` and then `nll_loss`): pytorch's crossEntropy: ``` loss_func = nn.CrossEntropyLoss() loss_func(acts, targ) ``` or: ``` F.cross_entropy(acts, targ) ``` > Note: this is the mean of all losses and this is all results without taking the mean: ``` nn.CrossEntropyLoss(reduction='none')(acts, targ) ``` > Note:Results above are cross entrophy loss for each image in the list (of course our current numbers are fake numbers) *** ### Manual calculation `log_softmax` + `nll_loss` First log_softmax: ``` log_sm_acts = torch.log_softmax(acts, dim=1) log_sm_acts ``` Then negative log likelihood: ``` F.nll_loss(log_sm_acts, targ, reduction='none') ``` >Note: __Results are identical__ *** ## REVISITING THE BASELINE MODEL (Model Interpretation) ``` #width 600 interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix(figsize=(12,12), dpi=60) interp.most_confused(min_val=5) ``` this is our baseline we can start improveing from this point. *** ## IMPROVING THE MODEL ### Fine Tune Fine tune the model with default arguments: ``` learn = cnn_learner(dls, resnet34, metrics=error_rate) learn.fine_tune(1, base_lr=0.1) ``` > Note: __This is where we overshot. Our loss just increase over second epoch is there a better way to find a learning rate?__ *** ### Learning Rate Finder ``` learn = cnn_learner(dls, resnet34, metrics=error_rate) suggested_lr= learn.lr_find() ``` > Warning: There is a discrepancy between lesson and reading group notebooks. In the book we get two values from the function but in reading group, only one. I thing there was an update for this function that not reflected in the book. ``` suggested_lr print(f"suggested: {suggested_lr.valley:.2e}") learn = cnn_learner(dls, resnet34, metrics=error_rate) learn.fine_tune(2, base_lr=8.32e-04) ``` At this time it decreases steadily #### __What's under the hood of `fine_tune`__ When we create a model from a pretrained network fastai automatically freezes all of the pretrained layers for us. When we call the `fine_tune` method fastai does two things: - Trains the randomly added layers for one epoch, with all other layers frozen - Unfreezes all of the layers, and trains them all for the number of epochs requested __Lets do it manually__ ``` learn = cnn_learner(dls, resnet34, metrics=error_rate) learn.fit_one_cycle(3, 8.32e-04) learn.unfreeze() ``` Run the `lr_find` again, because having more layers to train, and weights that have already been trained for three epochs, means our previously found learning rate isn't appropriate any more: ``` learn.lr_find() ``` Train again with the new lr. ``` learn.fit_one_cycle(6, lr_max=0.0001) ``` So far so good but there is more way to go *** ### Discriminative Learning Rates Basically we use variable learning rate for the model. Bigger rate for the later layers and smaller for early layers. ``` learn = cnn_learner(dls, resnet34, metrics=error_rate) learn.fit_one_cycle(3, 8.32e-04)# first lr learn.unfreeze() learn.fit_one_cycle(12, lr_max=slice(0.00005,0.0005))#second lr with a range ``` It is better most of the times.(sometimes I don't get good results, need to arrange the `slice` values more carefully) ``` learn.recorder.plot_loss() ``` > Note: Directly from the book: As you can see, the training loss keeps getting better and better. But notice that eventually the validation loss improvement slows, and sometimes even gets worse! This is the point at which the model is starting to over fit. In particular, the model is becoming overconfident of its predictions. But this does not mean that it is getting less accurate, necessarily. Take a look at the table of training results per epoch, and you will often see that the accuracy continues improving, even as the validation loss gets worse. In the end what matters is your accuracy, or more generally your chosen metrics, not the loss. The loss is just the function we've given the computer to help us to optimize. > Important: I need to think about it how loss increase and accuracy stil becoming better. ### Deeper Architectures In general, a bigger model has the ability to better capture the real underlying relationships in your data, and also to capture and memorize the specific details of your individual images. However, using a deeper model is going to require more GPU RAM, so you may need to lower the size of your batches to avoid an *out-of-memory error*. This happens when you try to fit too much inside your GPU and looks like: ``` Cuda runtime error: out of memory ``` You may have to restart your notebook when this happens. The way to solve it is to use a smaller batch size, which means passing smaller groups of images at any given time through your model. You can pass the batch size you want to the call creating your `DataLoaders` with `bs=`. The other downside of deeper architectures is that they take quite a bit longer to train. One technique that can speed things up a lot is *mixed-precision training*. This refers to using less-precise numbers (*half-precision floating point*, also called *fp16*) where possible during training. As we are writing these words in early 2020, nearly all current NVIDIA GPUs support a special feature called *tensor cores* that can dramatically speed up neural network training, by 2-3x. They also require a lot less GPU memory. To enable this feature in fastai, just add `to_fp16()` after your `Learner` creation (you also need to import the module). You can't really know ahead of time what the best architecture for your particular problem is—you need to try training some. So let's try a ResNet-50 now with mixed precision: ``` from fastai.callback.fp16 import * learn = cnn_learner(dls, resnet50, metrics=error_rate).to_fp16() learn.fine_tune(12, freeze_epochs=3) learn.recorder.plot_loss() ``` As above traing time is not changed much.
github_jupyter
# CNN WITH TF-SLIM #### ALL CODES ARE FROM [HWALSUKLEE](https://github.com/hwalsuklee/tensorflow-mnist-cnn) ``` import gzip import os from scipy import ndimage from six.moves import urllib import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim print ("PACKAGES LOADED") ``` # CNN MODEL WITH TF-SLIM ``` def CNN(inputs, _is_training=True): x = tf.reshape(inputs, [-1, 28, 28, 1]) batch_norm_params = {'is_training': _is_training, 'decay': 0.9, 'updates_collections': None} net = slim.conv2d(x, 32, [5, 5], padding='SAME' , activation_fn = tf.nn.relu , weights_initializer = tf.truncated_normal_initializer(stddev=0.01) , normalizer_fn = slim.batch_norm , normalizer_params = batch_norm_params , scope='conv1') net = slim.max_pool2d(net, [2, 2], scope='pool1') net = slim.conv2d(net, 64, [5, 5], scope='conv2') net = slim.max_pool2d(net, [2, 2], scope='pool2') net = slim.flatten(net, scope='flatten3') net = slim.fully_connected(net, 1024 , activation_fn = tf.nn.relu , weights_initializer = tf.truncated_normal_initializer(stddev=0.01) , normalizer_fn = slim.batch_norm , normalizer_params = batch_norm_params , scope='fc4') net = slim.dropout(net, keep_prob=0.7, is_training=_is_training, scope='dropout4') out = slim.fully_connected(net, 10, activation_fn=None, normalizer_fn=None, scope='fco') return out ``` # HANDLING MNIST ``` # DATA URL SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' DATA_DIRECTORY = "data" # PARAMETERS FOR MNIST IMAGE_SIZE = 28 NUM_CHANNELS = 1 PIXEL_DEPTH = 255 NUM_LABELS = 10 VALIDATION_SIZE = 5000 # Size of the validation set. # DOWNLOAD MNIST DATA, IF NECESSARY def maybe_download(filename): if not tf.gfile.Exists(DATA_DIRECTORY): tf.gfile.MakeDirs(DATA_DIRECTORY) filepath = os.path.join(DATA_DIRECTORY, filename) if not tf.gfile.Exists(filepath): filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath) with tf.gfile.GFile(filepath) as f: size = f.size() print('Successfully downloaded', filename, size, 'bytes.') return filepath # EXTRACT IMAGES def extract_data(filename, num_images): with gzip.open(filename) as bytestream: bytestream.read(16) buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS) data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH # -0.5~0.5 data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS) data = np.reshape(data, [num_images, -1]) return data # [image index, y, x, channels] # EXTRACT LABELS def extract_labels(filename, num_images): with gzip.open(filename) as bytestream: bytestream.read(8) buf = bytestream.read(1 * num_images) labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) num_labels_data = len(labels) one_hot_encoding = np.zeros((num_labels_data,NUM_LABELS)) one_hot_encoding[np.arange(num_labels_data),labels] = 1 one_hot_encoding = np.reshape(one_hot_encoding, [-1, NUM_LABELS]) return one_hot_encoding # AUGMENT TRAINING DATA def expend_training_data(images, labels): expanded_images = [] expanded_labels = [] j = 0 # counter for x, y in zip(images, labels): j = j+1 # APPEND ORIGINAL DATA expanded_images.append(x) expanded_labels.append(y) # ASSUME MEDIAN COLOR TO BE BACKGROUND COLOR bg_value = np.median(x) # this is regarded as background's value image = np.reshape(x, (-1, 28)) for i in range(4): # ROTATE IMAGE angle = np.random.randint(-15,15,1) new_img = ndimage.rotate(image,angle,reshape=False, cval=bg_value) # SHIFT IAMGE shift = np.random.randint(-2, 2, 2) new_img_ = ndimage.shift(new_img,shift, cval=bg_value) # ADD TO THE LIST expanded_images.append(np.reshape(new_img_, 784)) expanded_labels.append(y) expanded_train_total_data = np.concatenate((expanded_images, expanded_labels), axis=1) np.random.shuffle(expanded_train_total_data) return expanded_train_total_data # PREPARE MNIST DATA def prepare_MNIST_data(use_data_augmentation=True): # Get the data. train_data_filename = maybe_download('train-images-idx3-ubyte.gz') train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz') test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz') test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz') train_data = extract_data(train_data_filename, 60000) train_labels = extract_labels(train_labels_filename, 60000) test_data = extract_data(test_data_filename, 10000) test_labels = extract_labels(test_labels_filename, 10000) validation_data = train_data[:VALIDATION_SIZE, :] validation_labels = train_labels[:VALIDATION_SIZE,:] train_data = train_data[VALIDATION_SIZE:, :] train_labels = train_labels[VALIDATION_SIZE:,:] if use_data_augmentation: train_total_data = expend_training_data(train_data, train_labels) else: train_total_data = np.concatenate((train_data, train_labels), axis=1) train_size = train_total_data.shape[0] return train_total_data, train_size, validation_data, validation_labels, test_data, test_labels ``` # CONFIGURATION ``` MODEL_DIRECTORY = "model/model.ckpt" LOGS_DIRECTORY = "logs/train" training_epochs = 10 TRAIN_BATCH_SIZE = 50 display_step = 500 validation_step = 500 TEST_BATCH_SIZE = 5000 ``` # PREPARE MNIST DATA ``` batch_size = TRAIN_BATCH_SIZE # BATCH SIZE (50) num_labels = NUM_LABELS # NUMBER OF LABELS (10) train_total_data, train_size, validation_data, validation_labels \ , test_data, test_labels = prepare_MNIST_data(True) # PRINT FUNCTION def print_np(x, str): print (" TYPE AND SHAPE OF [%18s ] ARE %s and %14s" % (str, type(x), x.shape,)) print_np(train_total_data, 'train_total_data') print_np(validation_data, 'validation_data') print_np(validation_labels, 'validation_labels') print_np(test_data, 'test_data') print_np(test_labels, 'test_labels') ``` # DEFINE MODEL ``` # PLACEHOLDERS x = tf.placeholder(tf.float32, [None, 784]) y_ = tf.placeholder(tf.float32, [None, 10]) #answer is_training = tf.placeholder(tf.bool, name='MODE') # CONVOLUTIONAL NEURAL NETWORK MODEL y = CNN(x, is_training) # DEFINE LOSS with tf.name_scope("LOSS"): loss = slim.losses.softmax_cross_entropy(y, y_) # DEFINE ACCURACY with tf.name_scope("ACC"): correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # DEFINE OPTIMIZER with tf.name_scope("ADAM"): batch = tf.Variable(0) learning_rate = tf.train.exponential_decay( 1e-4, # LEARNING_RATE batch * batch_size, # GLOBAL_STEP train_size, # DECAY_STEP 0.95, # DECAY_RATE staircase=True) # LR = LEARNING_RATE*DECAY_RATE^(GLOBAL_STEP/DECAY_STEP) train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss,global_step=batch) # 'batch' IS AUTOMATICALLY UPDATED AS WE CALL 'train_step' # SUMMARIES saver = tf.train.Saver() tf.summary.scalar('learning_rate', learning_rate) tf.summary.scalar('loss', loss) tf.summary.scalar('acc', accuracy) merged_summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(LOGS_DIRECTORY, graph=tf.get_default_graph()) print ("MODEL DEFINED.") ``` # OPEN SESSION ``` sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer(), feed_dict={is_training: True}) ``` # OPTIMIZE ### FOR TESTING PURPOSES, SKIP THIS SECTION ``` # MAXIMUM ACCURACY max_acc = 0. # LOOP for epoch in range(training_epochs): # training_epochs: 10 # RANDOM SHUFFLE np.random.shuffle(train_total_data) train_data_ = train_total_data[:, :-num_labels] train_labels_ = train_total_data[:, -num_labels:] # ITERATIONS total_batch = int(train_size / batch_size) for iteration in range(total_batch): # GET CURRENT MINI-BATCH offset = (iteration * batch_size) % (train_size) batch_xs = train_data_[offset:(offset + batch_size), :] batch_ys = train_labels_[offset:(offset + batch_size), :] # OPTIMIZE _, train_accuracy, summary = sess.run([train_step, accuracy, merged_summary_op] , feed_dict={x: batch_xs, y_: batch_ys, is_training: True}) # WRITE LOG summary_writer.add_summary(summary, epoch*total_batch + iteration) # DISPLAY if iteration % display_step == 0: print("Epoch: [%3d/%3d] Batch: [%04d/%04d] Training Acc: %.5f" % (epoch + 1, training_epochs, iteration, total_batch, train_accuracy)) # GET ACCURACY FOR THE VALIDATION DATA if iteration % validation_step == 0: validation_accuracy = sess.run(accuracy, feed_dict={x: validation_data, y_: validation_labels, is_training: False}) print("Epoch: [%3d/%3d] Batch: [%04d/%04d] Validation Acc: %.5f" % (epoch + 1, training_epochs, iteration, total_batch, validation_accuracy)) # SAVE THE MODEL WITH HIGEST VALIDATION ACCURACY if validation_accuracy > max_acc: max_acc = validation_accuracy save_path = saver.save(sess, MODEL_DIRECTORY) print(" MODEL UPDATED TO [%s] VALIDATION ACC IS %.5f" % (save_path, validation_accuracy)) print("OPTIMIZATION FINISHED") ``` # COMPUTE TEST ACCURACY ``` # RESTORE SAVED NETWORK saver.restore(sess, MODEL_DIRECTORY) # COMPUTE ACCURACY FOR TEST DATA test_size = test_labels.shape[0] total_batch = int(test_size / batch_size) acc_buffer = [] for i in range(total_batch): offset = (i * batch_size) % (test_size) batch_xs = test_data[offset:(offset + batch_size), :] batch_ys = test_labels[offset:(offset + batch_size), :] y_final = sess.run(y, feed_dict={x: batch_xs, y_: batch_ys, is_training: False}) correct_prediction = np.equal(np.argmax(y_final, 1), np.argmax(batch_ys, 1)) acc_buffer.append(np.sum(correct_prediction.astype(float)) / batch_size) print("TEST ACCURACY IS: %.4f" % np.mean(acc_buffer)) ```
github_jupyter
### Tokenize and Lemmatize inputs Source: lecture notebooks + https://gist.github.com/4OH4/f727af7dfc0e6bb0f26d2ea41d89ee55 ``` import pandas as pd import json from sklearn.feature_extraction.text import CountVectorizer import nltk from nltk import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords import pickle import spacy import numpy as np train = pd.read_csv('data/train.csv') valid = pd.read_csv('data/valid.csv') test = pd.read_csv('data/valid.csv') train = train.dropna(subset=['title', 'abstract']).reset_index() valid = valid.dropna(subset=['title', 'abstract']).reset_index() test = test.dropna(subset=['title', 'abstract']).reset_index() with open('map_labels.json', 'r') as f: map_labels = json.load(f) def preprocess(dat=object): dat = dat.fillna('') dat['input'] = dat['title']+' '+dat['abstract'] dat['key'] = 'PMID:'+dat['pmid'].astype(str) # generate vector of Boolean values, for labels label_vec_idx = list() for i in range(len(dat)): labels = dat.loc[i, 'label'].split(';') label_vec_idx.append(list(map(map_labels.get, labels))) label_vec = [[0]*9 for i in range(len(label_vec_idx))] for i in range(len(label_vec_idx)): for j in label_vec_idx[i]: label_vec[i][j] = 1 dat['label_vec'] = label_vec # initialize dictionary dat_dict = {dat['key'][i]: {'input': dat['input'][i], 'label': dat['label'][i], 'label_vec': dat['label_vec'][i]} for i in range(len(dat))} # load_model = spacy.load('en_core_web_md', disable = ['parser','ner']) load_model = spacy.load('en_core_web_md') stop_words = set(stopwords.words('english')) # Lemmatize, NER, add to dictionary for i in dat_dict: doc = load_model(dat_dict[i]['input']) dat_dict[i]['lemmas'] = [token.lemma_ for token in doc if token.lemma_ not in stop_words and len(token.lemma_) > 1 ] # dat_dict[i]['ent_iob'] = [token.ent_iob_ for token in doc] # dat_dict[i]['ent_type'] = [token.ent_type_ for token in doc] # Generate word embeddings for each lemma nlp = spacy.load('en_core_web_md') for i in dat_dict: embeddings = {j: nlp.vocab[j].vector for j in dat_dict[i]['lemmas']} sentenceMatrix = np.array([embeddings[i] for i in embeddings]) dat_dict[i]['embeddings'] = sentenceMatrix return dat_dict %%time train_dict = preprocess(train) valid_dict = preprocess(valid) test_dict = preprocess(test) pickle.dump(train_dict, open('data/train.pkl', 'wb')) pickle.dump(valid_dict, open('data/valid.pkl', 'wb')) pickle.dump(test_dict, open('data/test.pkl', 'wb')) # pickle.dump(valid_dict, open('data/valid_ner.pkl', 'wb')) ```
github_jupyter
# Processing Oscilloscope Point Scan The samples from our oscilloscope connected to our microphone (since our sound card doesn't support going up to 28kHz) will be useful for visualizing the acoustics of our system. We will have to know beforehand which frequency we want to detect. This depends on the oscilloscope settings, especially the resolution of the FFT when calculating that on the scope. ``` import os import pickle import glob import skimage import numpy as np from collections import Counter from matplotlib import pyplot as plt %matplotlib inline ``` ## Data Loading ### Format This function loads in the data. The data should be in the format ```bash data_dir/<x_coord>_<y_coord>_<z_coord>.pkl ``` To make the design more modular we pass in the folder name to the function. Each pickle file should be a numpy array with dimensions $N \times D$, where $N$ is the number of samples collected at the point and $D$ is the dimension of the FFT data. Therefore, you can also provide a `sample_start` and `sample_end` to filter out the unneeded data. ### Example For example, a lot of my initial scans had the following parameters. 1. I set the oscilloscope to do a fourier transform with a resolution of $5$ Hz for each frequency bin. 2. The tranducer is outputting $28$ kHz from a sine wave. 3. When measuring with the `OscilloscopeMicrophone` class, I started sampling the FFT data from $0$ Hz to $50000$ Hz. Therefore, for `sample_start` and `sample_end` I should put $5595$ and $5605$ respectively, so I can look closely at the fourier transform from between $27975$ Hz and $28025$ Hz. I recommend giving a range of a few Hz even if you are measuring a pure tone because sometimes the FFT may smear. ``` def get_data(data_dir, sample_start=0, sample_end=200, verbose=False): fnames = list(sorted(glob.glob(os.path.join(data_dir, "*.pkl")))) print('Found %s records' % len(fnames)) # Load into a list of tuples of xmin, xmax, y, data data = [] XMIN, XMAX = None, None for fname in fnames: with open(fname, 'rb') as f: fft_data = pickle.load(f) # Isolate the frequency. In our case, 28kHz is usually around sample 8000 to 10000 try: amplitudes = fft_data[:, sample_start:sample_end] except IndexError: if verbose: print("indexerror for file %s" % fname) print('Diagonstics on the length of each sample:') lengths = [len(arr) for arr in fft_data] correct_length = min(lengths) amplitudes = np.array([t[:correct_length] for t in fft_data]) name = os.path.basename(fname).replace('.pkl', '').replace('continuous_', '') coords = [float(coord) for coord in name.split('_')] xmin, xmax, y = coords XMIN = xmin XMAX = xmax data.append((xmin, xmax, y, amplitudes)) # Sort by y coordinate (xmin and xmax are expected to be the same for all) data = list(sorted(data)) if not data: raise RuntimeError('No Data Found') return data ``` We also know that the transducer is being fed in a pure sine wave from the signal generator, so we expect a sharp spike at one specific frequency in the fourier transform. Therefore, we will get the sample have a function that returns the indices and values of the maximum values in each FFT sample. We also have a function that returns just the maximum amplitude found in your range. ``` def get_maxes_and_freqs(samples): """Takes in a N by S array, where N is the number of samples and S is the dimensions of the FFT sample taken. We will get a list of maxes and the frequency they correspond to as two np arrays, so we can plot as a scatterplot""" maxes = samples.max(axis=1) idxs = samples.argmax(axis=1) return idxs, maxes def get_max_amp(samples): """Takes in a N by S array, where N is the number of samples and S is the dimensions of the FFT sample taken. Gets the maximum amplitude of each sample, and then takes the average of the maximum amplitude over all the samples.""" return samples.max(axis=1).mean() ``` Now we can actually get the data from a scan that I did previously. ``` data = get_data('../data/1551236003') ``` ### Data Output Format Using Python, we've loaded the data as a list of tuples. We use this format: ```bash [(x_coord, y_coord, z_coord, ARR)] ``` where `ARR` is the numpy array of `NUM_SAMPLES x REDUCED_FFT_DIM`, where `REDUCED_FFT_DIM` was how much you reduced the FFT bins by supplying `sample_start` and `sample_end`. ``` data ``` Now we can plot the data by processing layers for each XY plane. If we looked at the numbers from data, we see that this scan is just scanning a box from (0, 0) to (100, 100) with a resolution of 2. ``` # For each frequency bin, we can make an image of what values we have. # otherwise, make that value -1 or something to show that we don't have info WIDTH, HEIGHT = 51, 51 image = np.zeros((WIDTH, HEIGHT)) for index, d in enumerate(data): idx, idy = int(index % WIDTH), int(index / WIDTH) _, max_amps = get_maxes_and_freqs(d[-1]) image[idx, idy] = max_amps.mean() plt.figure(figsize=(12, 12)) plt.title("A really old scan of just a transducer at the top at 28 kHz") plt.xlabel("X Coordinate (mm)") plt.ylabel("Y Coordinate (mm)") plt.imshow(image) plt.show() ``` ### Increasing the Resolution That image was pretty underwhelming, especially noting that the resolution was so poor, I don't even know what's going on. Here's another example of a completely different scan. This time let's actually look at what our samples look like. We'll take everything we have recorded and plot a pickle file. ``` data = get_data('../data/1544186931', sample_start=0, sample_end=100000) print("Shape of each pickle file.") print(data[0][-1].shape) plt.plot(data[0][-1].T) plt.show() ``` As we can see, the peak we want is probably the ones between bins 3000 and 4000. I think the peak near 2000 is probably artifacting. Let's reload our data. ``` data = get_data('../data/1544186931', sample_start=0, sample_end=100000) # For each frequency bin, we can make an image of what values we have. # otherwise, make that value -1 or something to show that we don't have info WIDTH, HEIGHT = 76, 76 image = np.zeros((WIDTH, HEIGHT)) for index, d in enumerate(data): idx, idy = int(index % WIDTH), int(index / WIDTH) image[idx, idy] = get_max_amp(d[-1]) plt.figure(figsize=(12, 12)) plt.title("A really old scan of just a transducer at the top at 28 kHz") plt.xlabel("X Coordinate (mm)") plt.ylabel("Y Coordinate (mm)") plt.imshow(image) plt.show() ``` We see that in this case, we get much better resolution of the transducer and can actually see waves. We can verify that the wavelengths agree with 28 kHz through an air medium.
github_jupyter
## Concept The two main structures to work with DQ0 quarantine via the DQ0 SDK are * Project - the current model environment, a workspace and directory the user can define models in. Project also provides access to trained models. * Experiment - the DQ0 runtime to execute training runs in the remote quarantine. Start by importing the core classes ``` %cd ../ # import dq0-sdk api from dq0.sdk.cli import Project, Experiment ``` ## Create a project Projects act as the working environment for model development. Each project has a model directory with a .meta file containing the model uuid, attached data sources etc. Creating a project with `Project.create(name='model_1')` is equivalent to calling the DQ0 Cli command `dq0-cli project create model_1` # DQ0 SDK Demo ## Prerequistes * Installed DQ0 SDK. Install with `pip install dq0-sdk` * Installed DQ0 CLI. * Proxy running and registered from the DQ0 CLI with `dq0-cli proxy add ...` * Valid session of DQ0. Log in with `dq0 user login` * Running instance of DQ0 CLI server: `dq0 server start` ``` # create a project with name 'model_1'. Automatically creates the 'model_1' directory and changes to this directory. project = Project(name='model_1') ``` ## Load a project Alternatively, you can load an existing project by first cd'ing into this directory and then call Project.load() This will read in the .meta file of this directory ``` %cd ../dq0-cli/census # Alternative: load a project from the current model directory project = Project.load() project.project_uuid ``` ## Create Experiment To execute DQ0 training commands inside the quarantine you define experiments for your projects. You can create as many experiments as you like for one project. ``` # Create experiment for project experiment = Experiment(project=project, name='experiment_1') ``` ## Get and attach data source For new projects you need to attach a data source. Existing (loaded) projects usually already have data sources attached. ``` # first get some info about available data sources sources = project.get_available_data_sources() # get info about the first source info = project.get_data_info(sources[0]) info ``` Get the dataset description: ``` # print data description info['data_description'] ``` Also, inspect the data column types including allowed values for feature generation: ``` # print information about column types and values info['data_type'] ``` Now, attach the dataset to our project ``` # attach the first dataset project.attach_data_source(sources[0]) ``` ## Define the model Working with DQ0 is basically about defining two functions: * setup_data() - called right before model training to prepare attached data sources * setup_model() - actual model definition code The easiest way to define those functions is to write them in the notebook (inline) and pass them to the project before calling deploy. Alternatively, the user can write the complete user_model.py to the project's directory. ### Define fuctions inline First variant with functions passed to the project instance. Note that you need to define imports inline inside the functions as only those code blocks are replaced in the source files. ``` # define functions def setup_data(): # load input data if self.data_source is None: logger.error('No data source found') return data = self.data_source.read() # read and preprocess the data dataset_df = self.preprocess() from sklearn.model_selection import train_test_split X_train_df, X_test_df, y_train_ts, y_test_ts =\ train_test_split(dataset_df.iloc[:, :-1], dataset_df.iloc[:, -1], test_size=0.33, random_state=42) self.input_dim = X_train_df.shape[1] # set data member variables self.X_train = X_train_df self.X_test = X_test_df self.y_train = y_train_ts self.y_test = y_test_ts def setup_model(): import tensorflow.compat.v1 as tf self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.1) self.loss = tf.keras.losses.SparseCategoricalCrossentropy() # As an alternative, define the loss function with a string self.epochs = 10 self.batch_size = 250 # self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate) self.optimizer = 'Adam' self.num_microbatches = 250 self.metrics = ['accuracy'] self.loss = tf.keras.losses.SparseCategoricalCrossentropy() self.model = tf.keras.Sequential([ tf.keras.layers.Input(self.input_dim), tf.keras.layers.Dense(10, activation='tanh'), tf.keras.layers.Dense(10, activation='tanh'), tf.keras.layers.Dense(2, activation='softmax')]) def preprocess(): # columns column_names_list = [ 'lastname', 'firstname', 'age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income' ] # columns types list drawn from data source types information above. columns_types_list = [ { 'name': 'age', 'type': 'int' }, { 'name': 'workclass', 'type': 'string', 'values': [ 'Private', 'Self-emp-not-inc', 'Self-emp-inc', 'Federal-gov', 'Local-gov', 'State-gov', 'Without-pay', 'Never-worked', 'Unknown' ] }, { 'name': 'fnlwgt', 'type': 'int' }, { 'name': 'education', 'type': 'string', 'values': [ 'Bachelors', 'Some-college', '11th', 'HS-grad', 'Prof-school', 'Assoc-acdm', 'Assoc-voc', '9th', '7th-8th', '12th', 'Masters', '1st-4th', '10th', 'Doctorate', '5th-6th', 'Preschool' ] }, { 'name': 'education-num', 'type': 'int' }, { 'name': 'marital-status', 'type': 'string', 'values': [ 'Married-civ-spouse', 'Divorced', 'Never-married', 'Separated', 'Widowed', 'Married-spouse-absent', 'Married-AF-spouse' ] }, { 'name': 'occupation', 'type': 'string', 'values': [ 'Tech-support', 'Craft-repair', 'Other-service', 'Sales', 'Exec-managerial', 'Prof-specialty', 'Handlers-cleaners', 'Machine-op-inspct', 'Adm-clerical', 'Farming-fishing', 'Transport-moving', 'Priv-house-serv', 'Protective-serv', 'Armed-Forces', 'Unknown' ] }, { 'name': 'relationship', 'type': 'string', 'values': [ 'Wife', 'Own-child', 'Husband', 'Not-in-family', 'Other-relative', 'Unmarried' ] }, { 'name': 'race', 'type': 'string', 'values': [ 'White', 'Asian-Pac-Islander', 'Amer-Indian-Eskimo', 'Other', 'Black' ] }, { 'name': 'sex', 'type': 'string', 'values': [ 'Female', 'Male' ] }, { 'name': 'capital-gain', 'type': 'int' }, { 'name': 'capital-loss', 'type': 'int' }, { 'name': 'hours-per-week', 'type': 'int' }, { 'name': 'native-country', 'type': 'string', 'values': [ 'United-States', 'Cambodia', 'England', 'Puerto-Rico', 'Canada', 'Germany', 'Outlying-US(Guam-USVI-etc)', 'India', 'Japan', 'Greece', 'South', 'China', 'Cuba', 'Iran', 'Honduras', 'Philippines', 'Italy', 'Poland', 'Jamaica', 'Vietnam', 'Mexico', 'Portugal', 'Ireland', 'France', 'Dominican-Republic', 'Laos', 'Ecuador', 'Taiwan', 'Haiti', 'Columbia', 'Hungary', 'Guatemala', 'Nicaragua', 'Scotland', 'Thailand', 'Yugoslavia', 'El-Salvador', 'Trinadad&Tobago', 'Peru', 'Hong', 'Holand-Netherlands', 'Unknown' ] } ] from dq0.sdk.data.preprocessing import preprocessing import sklearn.preprocessing import pandas as pd if 'dataset' in globals(): # local testing mode dataset = globals()['dataset'] else: # get the input dataset if self.data_source is None: logger.error('No data source found') return # read the data via the attached input data source dataset = self.data_source.read( names=column_names_list, sep=',', skiprows=1, index_col=None, skipinitialspace=True, na_values={ 'capital-gain': 99999, 'capital-loss': 99999, 'hours-per-week': 99, 'workclass': '?', 'native-country': '?', 'occupation': '?'} ) # drop unused columns dataset.drop(['lastname', 'firstname'], axis=1, inplace=True) column_names_list.remove('lastname') column_names_list.remove('firstname') # define target feature target_feature = 'income' # get categorical features categorical_features_list = [ col['name'] for col in columns_types_list if col['type'] == 'string'] # get categorical features quantitative_features_list = [ col['name'] for col in columns_types_list if col['type'] == 'int' or col['type'] == 'float'] # get arguments approach_for_missing_feature = 'imputation' imputation_method_for_cat_feats = 'unknown' imputation_method_for_quant_feats = 'median' features_to_drop_list = None # handle missing data dataset = preprocessing.handle_missing_data( dataset, mode=approach_for_missing_feature, imputation_method_for_cat_feats=imputation_method_for_cat_feats, imputation_method_for_quant_feats=imputation_method_for_quant_feats, # noqa: E501 categorical_features_list=categorical_features_list, quantitative_features_list=quantitative_features_list) if features_to_drop_list is not None: dataset.drop(features_to_drop_list, axis=1, inplace=True) # get dummy columns dataset = pd.get_dummies(dataset, columns=categorical_features_list, dummy_na=False) # unzip categorical features with dummies categorical_features_list_with_dummies = [] for col in columns_types_list: if col['type'] == 'string': for value in col['values']: categorical_features_list_with_dummies.append('{}_{}'.format(col['name'], value)) # add missing columns missing_columns = set(categorical_features_list_with_dummies) - set(dataset.columns) for col in missing_columns: dataset[col] = 0 # and sort the columns dataset = dataset.reindex(sorted(dataset.columns), axis=1) # Scale values to the range from 0 to 1 to be precessed by the neural network dataset[quantitative_features_list] = sklearn.preprocessing.minmax_scale(dataset[quantitative_features_list]) # label target y_ts = dataset[target_feature] le = sklearn.preprocessing.LabelEncoder() y_bin_nb = le.fit_transform(y_ts) y_bin = pd.Series(index=y_ts.index, data=y_bin_nb) dataset.drop([target_feature], axis=1, inplace=True) dataset[target_feature] = y_bin return dataset # set model code in project project.set_model_code(setup_data=setup_data, setup_model=setup_model, preprocess=preprocess, parent_class_name='NeuralNetworkClassification') ``` ### Define functions as source code Second variant, writing the complete model. Template can be retrieved by `!cat models/user_model.py` which is created by Project create. ``` %%writefile models/user_model.py import logging from dq0.sdk.models.tf import NeuralNetworkClassification logger = logging.getLogger() class UserModel(NeuralNetworkClassification): """Derived from dq0.sdk.models.tf.NeuralNetwork class Model classes provide a setup method for data and model definitions. """ def __init__(self): super().__init__() def setup_data(self): """Setup data function. See code above...""" pass def preprocess(self): """Preprocess the data. See code above...""" pass def setup_model(self): """Setup model function See code above...""" pass ``` ## Train the model After testing the model locally directly in this notebook, it's time to train it inside the DQ0 quarantine. This is done by calling experiment.train() which in turn calls the Cli commands `dq0-cli project deploy` and `dq0-cli model train` ``` run = experiment.run() ``` train is executed asynchronously. You can wait for the run to complete or get the state with get_state: (TBD: in the future there could by a jupyter extension that shows the run progress in a widget.) ``` # wait for completion run.wait_for_completion(verbose=True) ``` When the run has completed you can retrieve the results: ``` # get training results print(run.get_results()) ``` After train dq0 will run the model checker to evaluate if the trained model is safe and allowed for prediction. Get the state of the checker run together with the other state information with the get_state() function: ``` # get the state whenever you like print(run.get_state()) # get the model model = run.get_model() model.__dict__ # register the model model.register() ``` ## Predict Finally, it's time to use the trained model to predict something ``` import numpy as np import pandas as pd # check DQ0 privacy clearing if model.predict_allowed: # create predict set records = [ { 'lastname': 'some-lastname', 'firstname': 'some-firstname', 'age': 45, 'workclass':'Private', 'fnlwgt': 544091, 'education': 'HS-grad', 'education-num': 9, 'marital-status': 'Married-AF-spouse', 'occupation': 'Exec-managerial', 'relationship': 'Wife', 'race': 'White', 'sex': 'Female', 'capital-gain': 0, 'capital-loss': 0, 'hours-per-week': 25, 'native-country': 'United-States', 'income': '<=50K' }, { 'lastname': 'some-lastname', 'firstname': 'some-firstname', 'age': 29, 'workclass': 'Federal-gov', 'fnlwgt': 162298, 'education': 'Masters', 'education-num': 14, 'marital-status': 'Married-civ-spouse', 'occupation': 'Exec-managerial', 'relationship': 'Husband', 'race': 'White', 'sex': 'Male', 'capital-gain': 34084, 'capital-loss': 0, 'hours-per-week': 70, 'native-country': 'United-States', 'income': '<=50K' } ] dataset = pd.DataFrame.from_records(records) # drop target (included above only because of compatability with preprocess function) dataset.drop(['income'], axis=1, inplace=True) # load or get numpy predict data # predict_data = np.load(‘X_demo_predict.npy’) predict_data = dataset.to_numpy() # call predict #run = model.predict(predict_data) run = model.predict(predict_data) # wait for completion run.wait_for_completion(verbose=True) # get predict results print(run.get_results()) ```
github_jupyter
## 读取数据 ``` import pandas as pd train_labeled_cn = pd.read_csv('../data/raw/cn_train.csv', encoding='utf-8') dev_cn = pd.read_csv('../data/raw/cn_dev.csv', encoding='utf-8') train_labeled_cn.shape train_labeled_cn.columns train_labeled_cn.head(5) ``` ## 统计重复的句子 ``` dup_sentence = train_labeled_cn[train_labeled_cn['Sentence'].duplicated(keep=False)] len(dup_sentence) dup_sentence dup_sentence['Label'].value_counts() ``` ## Dialogue长度 ``` dialogue_info = {} greater = 0 for index, line in train_labeled_cn.iterrows(): dialogue_id = int(line[1]) dialogue_info[dialogue_id] = dialogue_info.get(dialogue_id, 0) + 1 for key, value in dialogue_info.items(): if value > 64: greater += 1 print(len(dialogue_info)) print(greater) ``` ## 标签分布 ``` train_labeled_cn['Label'].value_counts() # 1:2.4769 %matplotlib inline train_labeled_cn['Label'].value_counts(normalize=True).plot(kind='bar'); ``` ## 人物数量及标签分布 ``` speaker_labels_mean = dict(train_labeled_cn.groupby("Speaker")['Label'].mean()) # sorted(speaker_labels_mean.items(), key=lambda item: item[1], reverse=True) speaker_labels_sum = dict(train_labeled_cn.groupby("Speaker")['Label'].sum()) # print(speaker_labels_sum) # sorted(speaker_labels_sum.items(), key=lambda item: item[1], reverse=True) from collections import Counter train_cn_counter = Counter(train_labeled_cn['Speaker']) dev_cn_counter = Counter(dev_cn['Speaker']) # print(train_cn_counter) # print("**********************************************") print(dev_cn_counter) speaker_analysis = {} for (key, value) in speaker_labels_sum.items(): speaker_analysis[key] = [round(train_cn_counter[key], 3), value, round(speaker_labels_mean[key], 3)] # print(speaker_analysis) sorted(speaker_analysis.items(), key=lambda item: item[1][2], reverse=True) train_cn_set = set(train_labeled_cn['Speaker']) dev_cn_set = set(dev_cn['Speaker']) print("训练集人物个数:", len(train_cn_set)) print("开发集人物个数:", len(dev_cn_set)) print(dev_cn_set - train_cn_set) ``` 把训练集中的笑点数为0的speaker全丢到一类:other,开发集中有在训练集中未出现过的speaker就不管了 ## 文本长度 ``` train_labeled_cn['Sentence'].str.len().describe() dev_cn['Sentence'].str.len().describe() ``` ## 数据划分 ``` from sklearn.model_selection import train_test_split dataset = train_labeled[['Dialogue_id', 'Speaker', 'Sentence', 'Label']] train, valid = train_test_split(dataset, test_size=0.2, stratify=train_labeled['Label'], random_state=1000) train.to_csv('data/preprocess/cn_train.tsv', index=False, header=False, sep='\t',encoding='utf-8') valid.to_csv('data/preprocess/cn_dev.tsv', index=False, header=False, sep='\t', encoding='utf-8') dataset.to_csv('data/preprocess/cn_total.tsv', index=False, header=False, sep='\t', encoding='utf-8') ``` # 英文 ``` import pandas as pd train_labeled_en = pd.read_csv('../data/raw/en_train.csv', encoding='utf-8') dev_en = pd.read_csv('../data/raw/en_dev.csv', encoding='utf-8') train_labeled_en.shape train_labeled_en.columns train_labeled_en.head(5) train_labeled_en['Sentence'].head(10) pd.set_option('max_columns',1000) pd.set_option('max_row',1000) pd.set_option('display.float_format', lambda x: '%.5f' % x) dup_sentence_en = train_labeled_en[train_labeled_en['Sentence'].duplicated(keep=False)] len(dup_sentence_en) dup_sentence_en dup_sentence_en['Label'].value_counts() ``` ## Dialogue长度 ``` dialogue_info = {} greater = 0 for index, line in train_labeled_en.iterrows(): dialogue_id = int(line[1]) dialogue_info[dialogue_id] = dialogue_info.get(dialogue_id, 0) + 1 for key, value in dialogue_info.items(): if value > 16: greater += 1 print(len(dialogue_info)) print(greater) ``` ## 标签分布 ``` train_labeled_en['Label'].value_counts() # 1:3.214 %matplotlib inline train_labeled_en['Label'].value_counts(normalize=True).plot(kind='bar'); ``` ## 人物数量及标签分布 ``` speaker_en_labels_mean = dict(train_labeled_en.groupby("Speaker")['Label'].mean()) # sorted(speaker_labels_mean.items(), key=lambda item: item[1], reverse=True) speaker_en_labels_sum = dict(train_labeled_en.groupby("Speaker")['Label'].sum()) # print(speaker_labels_sum) # sorted(speaker_labels_sum.items(), key=lambda item: item[1], reverse=True) from collections import Counter train_en_counter = Counter(train_labeled_en['Speaker']) dev_en_counter = Counter(dev_en['Speaker']) # print(train_cn_counter) # print("**********************************************") print(dev_en_counter) speaker_analysis_en = {} for (key, value) in speaker_en_labels_sum.items(): speaker_analysis_en[key] = [round(train_en_counter[key], 3), value, round(speaker_en_labels_mean[key], 3)] # print(speaker_analysis) sorted(speaker_analysis_en.items(), key=lambda item: item[1][2], reverse=True) train_en_set = set(train_labeled_en['Speaker']) dev_en_set = set(dev_en['Speaker']) print("训练集人物个数:", len(train_en_set)) print("开发集人物个数:", len(dev_en_set)) print(dev_en_set - train_en_set) ``` ## 文本长度 ``` train_labeled_en['Sentence'].apply(lambda x : len(str(x).split(" "))).describe() dev_en['Sentence'].apply(lambda x : len(str(x).split(" "))).describe() ``` ## 数据划分 ``` from sklearn.model_selection import train_test_split dataset = train_labeled_en[['Dialogue_id', 'Speaker', 'Sentence', 'Label']] train, valid = train_test_split(dataset, test_size=0.2, stratify=train_labeled['Label'], random_state=1000) train.to_csv('data/preprocess/en_train.tsv', index=False, header=False, sep='\t',encoding='utf-8') valid.to_csv('data/preprocess/en_dev.tsv', index=False, header=False, sep='\t', encoding='utf-8') dataset.to_csv('data/preprocess/en_total.tsv', index=False, header=False, sep='\t', encoding='utf-8') import pandas as pd train_labeled_en = pd.read_csv('data/raw/cn_dev.csv', encoding='utf-8') train_labeled_en.shape train_labeled_en.columns train_labeled_en.head(5) train_labeled_en['Sentence'].apply(lambda x : len(str(x).split(" "))).describe() dataset = train_labeled_en[['Dialogue_id', 'Speaker', 'Sentence']] dataset.to_csv('data/preprocess/cn_test.tsv', index=False, header=False, sep='\t', encoding='utf-8') ```
github_jupyter
# Labeled Stream Creator ## Environment ``` import nuclio import os base_path = os.path.abspath('../') base_stream_path = f'/users/' + os.environ['V3IO_USERNAME']+ f'{base_path[5:]}' data_path = os.path.join(base_path, 'data') src_path = os.path.join(base_path, 'src') streaming_path = os.path.join(base_stream_path, 'streaming') fs_streaming_path = os.path.join(base_path, 'streaming') os.environ['base_path'] = base_path os.environ['data_path'] = data_path os.environ['src_path'] = src_path os.environ['streaming_path'] = streaming_path os.environ['fs_streaming_path'] = os.path.join(base_path, 'streaming') os.environ['METRICS_TABLE'] = fs_streaming_path + '/metrics' os.environ['PREDICTIONS_TABLE'] = fs_streaming_path+'/predictions' os.environ['OUTPUT_STREAM'] = streaming_path+'/labeled_stream' os.environ['prediction_col'] = 'predictions' os.environ['label_col'] = 'is_error' os.environ['output_stream_shards'] = '1' os.environ['BATCHES_TO_GENERATE'] = '20' ``` ## Function ``` # nuclio: start-code import os import pandas as pd import json import v3io import v3io.dataplane import socket def split_path(mntpath=''): if mntpath[0] == '/': mntpath = mntpath[1:] paths = mntpath.split('/') container = paths[0] subpath = '' if len(paths) > 1: subpath = mntpath[len(container):] return container, subpath def create_stream(context, path, shards=1): # create a stream w shards container, stream_path = split_path(path) context.logger.info(f'Creating stream in Container: {container} & Path {stream_path}') response = context.v3io_client.stream.create(container=container, stream_path=stream_path, shard_count=shards, raise_for_status=v3io.dataplane.RaiseForStatus.never) response.raise_for_status([409, 204]) def push_to_stream(context, stream_path, data): def restructure_stream_event(context, event): instances = [dict()] for key in data.keys(): if key not in ['when', 'class', 'model', 'worker', 'hostname', context.prediction_col]: instances[0].update({key: event.pop(key)}) event['request'] = {'instances': instances} event['resp'] = [int(event.pop(context.prediction_col))] return event records = json.loads(data.to_json(orient='records')) records = [{'data': json.dumps(restructure_stream_event(context, record))} for record in records] context.logger.info(f'Logging {len(records)} records, Record example: {records[0]}') container, stream_path = split_path(stream_path) # batch step = 10 for idx in range(0, len(records), step): response = context.v3io_client.put_records(container=container, path=stream_path, records=records[idx:idx+step]) def get_data_parquet(context, table, files_to_select=1): mpath = [os.path.join(table, file) for file in os.listdir(table) if file.endswith(('parquet', 'pq'))] files_by_updated = sorted(mpath, key=os.path.getmtime, reverse=False) context.logger.debug_with('Input', input_files=files_by_updated[:files_to_select]) dfs = pd.concat([pd.read_parquet(file) for file in files_by_updated[:files_to_select]]) return dfs def init_context(context): # How many batches to create? (-1 will run forever) batches_to_generate = int(os.getenv('BATCHES_TO_GENERATE', 20)) setattr(context, 'batches_to_generate', batches_to_generate) setattr(context, 'batches_generated', 0) # Set env vars setattr(context, 'metrics_table', os.environ['METRICS_TABLE']) setattr(context, 'predictions_table', os.environ['PREDICTIONS_TABLE']) setattr(context, 'output_stream', os.environ['OUTPUT_STREAM']) setattr(context, 'timestamp_col', os.getenv('timestamp_col', 'when')) setattr(context, 'orig_timestamp_col', os.getenv('orig_timestamp_col', 'timestamp')) v3io_client = v3io.dataplane.Client(logger_verbosity='DEBUG', transport_verbosity='DEBUG') # v3io_client.stream.create(container='users', stream_path='/orz/mlrun-demos/demos/network-operations/streaming/labeled_stream', shard_count=1) setattr(context, 'v3io_client', v3io_client) create_stream(context, context.output_stream) setattr(context, 'label_col', os.environ['label_col']) setattr(context, 'prediction_col', os.environ['prediction_col']) def handler(context, event): # Limit the number of generated batches to save cluster resources # for people forgetting the demo running if (context.batches_to_generate == -1) or (context.batches_generated <= context.batches_to_generate): metrics = get_data_parquet(context, context.metrics_table, 2).loc[:, context.label_col].astype('int') metrics.index.names = list([name if name != context.orig_timestamp_col else context.timestamp_col for name in metrics.index.names]) predictions = get_data_parquet(context, context.predictions_table, 2) context.logger.debug(f'Labeling metrics ({metrics.shape}) and predictions ({predictions.shape})') context.logger.debug_with('Indexes', metrics_index=metrics.index.names, predictions_index=predictions.index.names) print('metrics') print(metrics.head()) print(type(metrics)) metrics = pd.DataFrame(metrics) print('change') print(type(metrics)) print(metrics.head()) print(metrics.index.names) full_df = pd.merge(left=predictions, right=metrics, left_on=metrics.index.names, how='left', right_index=True) full_df = full_df.reset_index() context.logger.info(f'Fully labeled batch size is {full_df.shape}') context.logger.info(f'Indexes: {list(full_df.index.names)}') context.logger.info(f'Columns: {full_df.columns}') context.logger.info_with('sample', full_df=full_df.head(1)) push_to_stream(context, context.output_stream, full_df) # Update batches count context.batches_generated += 1 # nuclio: end-code ``` ## Test ``` init_context(context) event = nuclio.Event(body='') out = handler(context, event) out ``` ## Stream test ``` from v3io.dataplane import Client from pprint import pprint v3io_client = Client() # v3io_client.delete_stream(container='users', path='/admin/demos/network-operations/streaming/labeled_stream') def print_stream(path, shard='0', seek_type='EARLIEST', last=100): # seek the shard to the first record in it container, stream_path = split_path(path) shard_path = os.path.join(stream_path, shard) response = v3io_client.seek_shard(container=container, path=shard_path, seek_type=seek_type) response.raise_for_status() # get records, starting from the location we got from seek response = v3io_client.get_records(container=container, path=shard_path, location=response.output.location) response.raise_for_status() models = ['pagehinkley', 'eddm', 'ddm'] result_record = response.output.records records = [json.loads(record.data) for record in result_record[:last]] pprint(records) print_stream(context.output_stream, seek_type='EARLIEST', last=2) ``` ## Deploy ``` from mlrun import code_to_function, mount_v3io fn = code_to_function('labeled-stream-creator', kind='nuclio', project='network-operations', image='mlrun/ml-models') fn.spec.build.commands = ['pip install v3io'] fn.apply(mount_v3io()) fn.add_trigger('cron', nuclio.triggers.CronTrigger(interval='1m')) fn.set_envs({'METRICS_TABLE' : fs_streaming_path + '/metrics', 'PREDICTIONS_TABLE' : fs_streaming_path+'/predictions', 'OUTPUT_STREAM' : streaming_path+'/labeled_stream', 'prediction_col' : 'predictions', 'label_col' : 'is_error', 'output_stream_shards' : '1', 'BATCHES_TO_GENERATE' : '20'}) fn.save() fn.export('../src/labeled_stream_creator.yaml') fn.deploy(project='network-operations') fn.invoke('') ```
github_jupyter
*This notebook assumes to be launched inside the source root and it uses relative path to obtain other resources.* Test MapD->PyGDF->matrix ``` PWD = !pwd import sys import os.path from pprint import pprint ``` Add import path to MapD Thrift binding and Arrow schema ``` thirdparty_path = os.path.join(PWD[0], '..', 'thirdparty') sys.path.append(thirdparty_path) thirdparty_path ``` If `pygdf` cannot be imported, uncomment code below: ``` # pygdf_path = os.path.join(PWD[0], '..') # sys.path.append(pygdf_path) import pygdf from thrift.protocol import TBinaryProtocol from thrift.protocol import TJSONProtocol from thrift.transport import TSocket from thrift.transport import THttpClient from thrift.transport import TTransport from mapd import MapD from mapd import ttypes ``` MapD connection ``` def get_client(host_or_uri, port, http): if http: transport = THttpClient.THttpClient(host_or_uri) protocol = TJSONProtocol.TJSONProtocol(transport) else: socket = TSocket.TSocket(host_or_uri, port) transport = TTransport.TBufferedTransport(socket) protocol = TBinaryProtocol.TBinaryProtocol(transport) client = MapD.Client(protocol) transport.open() return client db_name = 'mapd' user_name = 'mapd' passwd = 'HyperInteractive' hostname = 'localhost' portno = 9091 client = get_client(hostname, portno, False) session = client.connect(user_name, passwd, db_name) print('Connection complete') ``` The Query ``` columns = """ INCEARN,RECTYPE,IPUMS_YEAR,DATANUM,SERIAL,NUMPREC,SUBSAMP,HHWT,HHTYPE,REPWT,ADJUST,CPI99,REGION,STATEICP,STATEFIP,COUNTY,COUNTYFIPS,METRO,METAREA,METAREAD,MET2013,MET2013ERR,CITY,CITYERR,CITYPOP,PUMA,PUMARES2MIG,STRATA,PUMASUPR,CONSPUMA,CPUMA0010,APPAL,APPALD,HOMELAND,MET2003,CNTRY,GQ,GQTYPE,GQTYPED,FARM,OWNERSHP,OWNERSHPD,MORTGAGE,MORTGAG2,COMMUSE,FARMPROD,ACREHOUS,MORTAMT1,MORTAMT2,TAXINCL,INSINCL,PROPINSR,PROPTX99,OWNCOST,RENT,RENTGRS,RENTMEAL,CONDOFEE,MOBLHOME,MOBLHOM2,MOBLOAN,SECRES,SECRESMO,SECRESRE,COSTELEC,COSTGAS,COSTWATR,COSTFUEL,PUBHOUS,RENTSUB,HEATSUB,LUNCHSUB,FOODSTMP,FDSTPAMT,VALUEH,LINGISOL,VACANCY,KITCHEN,KITCHENORIG,FRIDGE,FRIDGEORIG,SINK,STOVE,ROOMS,ROOMSORIG,PLUMBING,HOTWATER,SHOWER,TOILET,BUILTYR,BUILTYR2,UNITSSTR,BEDROOMS,BEDROOMSORIG,PHONE,PHONEORIG,CILAPTOP,CIHAND,CIOTHCOMP,CINETHH,CIMODEM,CISAT,CIDSL,CIFIBER,CIBRDBND,CIDIAL,CIOTHSVC,FUELHEAT,VEHICLES,SSMC,NFAMS,NSUBFAM,NCOUPLES,NMOTHERS,NFATHERS,MULTGEN,MULTGEND,CBNSUBFAM,REPWT1,REPWT2,REPWT3,REPWT4,REPWT5,REPWT6,REPWT7,REPWT8,REPWT9,REPWT10,REPWT11,REPWT12,REPWT13,REPWT14,REPWT15,REPWT16,REPWT17,REPWT18,REPWT19,REPWT20,REPWT21,REPWT22,REPWT23,REPWT24,REPWT25,REPWT26,REPWT27,REPWT28,REPWT29,REPWT30,REPWT31,REPWT32,REPWT33,REPWT34,REPWT35,REPWT36,REPWT37,REPWT38,REPWT39,REPWT40,REPWT41,REPWT42,REPWT43,REPWT44,REPWT45,REPWT46,REPWT47,REPWT48,REPWT49,REPWT50,REPWT51,REPWT52,REPWT53,REPWT54,REPWT55,REPWT56,REPWT57,REPWT58,REPWT59,REPWT60,REPWT61,REPWT62,REPWT63,REPWT64,REPWT65,REPWT66,REPWT67,REPWT68,REPWT69,REPWT70,REPWT71,REPWT72,REPWT73,REPWT74,REPWT75,REPWT76,REPWT77,REPWT78,REPWT79,REPWT80,RESPMODE,PERNUM,PERWT,SLWT,REPWTP,FAMSIZE,NCHILD,NCHLT5,FAMUNIT,ELDCH,YNGCH,NSIBS,MOMLOC,STEPMOM,MOMRULE,POPLOC,STEPPOP,POPRULE,SPLOC,SPRULE,SUBFAM,SFTYPE,SFRELATE,CBSUBFAM,CBSFTYPE,CBSFRELATE,RELATE,RELATED,SEX,AGE,AGEORIG,BIRTHQTR,MARST,BIRTHYR,MARRNO,MARRINYR,YRMARR,DIVINYR,WIDINYR,FERTYR,RACE,RACED,HISPAN,HISPAND,BPL,BPLD,ANCESTR1,ANCESTR1D,ANCESTR2,ANCESTR2D,CITIZEN,YRNATUR,YRIMMIG,YRSUSA1,YRSUSA2,SPOKEN_LANGUAGE,LANGUAGED,SPEAKENG,TRIBE,TRIBED,RACESING,RACESINGD,RACAMIND,RACASIAN,RACBLK,RACPACIS,RACWHT,RACOTHER,RACNUM,SCHOOL,EDUC,EDUCD,GRADEATT,GRADEATTD,SCHLTYPE,DEGFIELD,DEGFIELDD,DEGFIELD2,DEGFIELD2D,EMPSTAT,EMPSTATD,LABFORCE,OCC,OCC1950,OCC1990,OCC2010,IND,IND1950,IND1990,CLASSWKR,CLASSWKRD,OCCSOC,INDNAICS,WKSWORK1,WKSWORK2,UHRSWORK,WRKLSTWK,ABSENT,LOOKING,AVAILBLE,WRKRECAL,WORKEDYR,POVERTY,OCCSCORE,SEI,HWSEI,PRESGL,PRENT,ERSCOR50,ERSCOR90,EDSCOR50,EDSCOR90,NPBOSS50,NPBOSS90,MIGRATE1,MIGRATE1D,MIGPLAC1,MIGMET1,MIGTYPE1,MIGCITY1,MIGPUMS1,MIGPUMA1,MOVEDIN,MOVEDINORIG,DISABWRK,VETDISAB,DIFFREM,DIFFPHYS,DIFFMOB,DIFFCARE,DIFFSENS,DIFFEYE,DIFFHEAR,VETSTAT,VETSTATD,VET01LTR,VET95X00,VET90X01,VET90X95,VET75X90,VET80X90,VET75X80,VETVIETN,VET55X64,VETKOREA,VET47X50,VETWWII,VETOTHER,VETYRS,PWSTATE2,PWMETRO,PWCITY,PWTYPE,PWPUMA00,PWPUMAS,TRANWORK,CARPOOL,RIDERS,TRANTIME,DEPARTS,ARRIVES,GCHOUSE,GCMONTHS,GCRESPON,PROBAI,PROBAPI,PROBBLK,PROBOTH,PROBWHT,REPWTP1,REPWTP2,REPWTP3,REPWTP4,REPWTP5,REPWTP6,REPWTP7,REPWTP8,REPWTP9,REPWTP10,REPWTP11,REPWTP12,REPWTP13,REPWTP14,REPWTP15,REPWTP16,REPWTP17,REPWTP18,REPWTP19,REPWTP20,REPWTP21,REPWTP22,REPWTP23,REPWTP24,REPWTP25,REPWTP26,REPWTP27,REPWTP28,REPWTP29,REPWTP30,REPWTP31,REPWTP32,REPWTP33,REPWTP34,REPWTP35,REPWTP36,REPWTP37,REPWTP38,REPWTP39,REPWTP40,REPWTP41,REPWTP42,REPWTP43,REPWTP44,REPWTP45,REPWTP46,REPWTP47,REPWTP48,REPWTP49,REPWTP50,REPWTP51,REPWTP52,REPWTP53,REPWTP54,REPWTP55,REPWTP56,REPWTP57,REPWTP58,REPWTP59,REPWTP60,REPWTP61,REPWTP62,REPWTP63,REPWTP64,REPWTP65,REPWTP66,REPWTP67,REPWTP68,REPWTP69,REPWTP70,REPWTP71,REPWTP72,REPWTP73,REPWTP74,REPWTP75,REPWTP76,REPWTP77,REPWTP78,REPWTP79,REPWTP80 """.strip() print(len(columns.split(','))) query = "SELECT {} FROM ipums_easy WHERE INCEARN > 100;".format(columns) print('Query is : ' + query) # always use True for is columnar results = client.sql_execute_cudf(session, query, device_id=0, first_n=-1) results ``` Use Numba to access the IPC memory handle Note: this requires numba 0.32.0 + PR #2023 ```bash git clone https://github.com/numba/numba cd numba git fetch origin pull/2023/merge:pr/2023 git checkout pr/2023 python setup.py install ``` ``` from numba import cuda from numba.cuda.cudadrv import drvapi ipc_handle = drvapi.cu_ipc_mem_handle(*results.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle, size=results.df_size) ctx = cuda.current_context() dptr = ipch.open(ctx) dptr ``` `dptr` is GPU memory containing the query result Convert `dptr` into a GPU device ndarray (numpy array like object on GPU) ``` import numpy as np dtype = np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) ``` Use PyGDF to read the arrow metadata from the query ``` from pygdf.gpuarrow import GpuArrowReader reader = GpuArrowReader(darr) ``` Wrap result in a Python CUDA DataFrame ``` from pygdf.dataframe import DataFrame df = DataFrame(reader.to_dict().items()) ``` Preprocess the data ``` num_cols = set() cat_cols = set() response_set = set(['INCEARN']) feature_names = set(df.columns) - response_set ``` Determine categorical and numeric columns. Compute unique values from categorical columns. ``` uniques = {} for k in feature_names: try: uniquevals = df[k].unique_k(k=1000) uniques[k] = uniquevals except ValueError: # more than 1000 unique values num_cols.add(k) else: # within 1000 unique values nunique = len(uniquevals) if nunique < 2: del df[k] # drop constant column elif 1 < nunique < 1000: cat_cols.add(k) # as cat column else: num_cols.add(k) # as num column ``` Fix numeric columns. Fill NA, Scale to [0, 1]. Drop near constant ``` for k in (num_cols - response_set): df[k] = df[k].fillna(df[k].mean()) assert df[k].null_count == 0 std = df[k].std() # drop near constant columns if not np.isfinite(std) or std < 1e-4: del df[k] print('drop near constant', k) else: df[k] = df[k].scale() ``` Expand categorical columns ``` for k in cat_cols: cats = uniques[k][1:] # drop first df = df.one_hot_encoding(k, prefix=k, cats=cats) del df[k] ``` Ensure INCEARN is float64 ``` df['INCEARN'] = df['INCEARN'].astype(np.float64) # Print dtypes {df[k].dtype for k in df.columns} ``` Turn the dataframe into a matrix ``` gpu_matrix = df.as_gpu_matrix() ``` The ctypes pointer to the gpu matrix ``` ctypes_ptr = gpu_matrix.device_ctypes_pointer print('address value as integer', hex(ctypes_ptr.value)) ``` Get numpy array for the matrix ``` host_matrix = gpu_matrix.copy_to_host() host_matrix.shape ``` Cleanup the IPC handle ``` ipch.close() ```
github_jupyter
# Geometries Geometry entities are child elements of `<visual>` or `<collision>` elements. ``` # Import the element creator from pcg_gazebo.parsers.sdf import create_sdf_element ``` ## Basic entities Demonstration of the basic SDF elements that can be generated with and without the optional parameters. ### Geometries #### Box ``` # Default box box = create_sdf_element('box') print('Default box') print(box) print('Default box - as dict') print(box.to_dict()) print('Default box - as SDF') print(box.to_xml_as_str()) # Changing the size box.size = [2, 3, 4] print('Custom box') print(box) # Exporting # box.export_xml('/tmp/box.sdf') ``` #### Cylinder ``` # Default cylinder cylinder = create_sdf_element('cylinder') print('Default cylinder') print(cylinder) print('Default cylinder - as dict') print(cylinder.to_dict()) print('Default cylinder - as SDF') print(cylinder.to_xml_as_str()) # Changing the parameters cylinder.radius = 2 cylinder.length = 3 print('Custom cylinder') print(cylinder) # Exporting # cylinder.export_xml('/tmp/cylinder.sdf') ``` #### Sphere ``` # Default sphere sphere = create_sdf_element('sphere') print('Default sphere') print(sphere) print('Default sphere - as dict') print(sphere.to_dict()) print('Default sphere - as SDF') print(sphere.to_xml_as_str()) # Changing the parameters sphere.radius = 2 print('Custom sphere') print(sphere) # Exporting # sphere.export_xml('/tmp/sphere.sdf') ``` #### Plane ``` # Default plane plane = create_sdf_element('plane') print('Default plane') print(plane) print('Default plane - as dict') print(plane.to_dict()) print('Default plane - as SDF') print(plane.to_xml_as_str()) # Changing the parameters # Length of each side of the plane plane.size = [10, 10] # Normal direction of the plane plane.normal = [1, 0, 0] print('Custom plane') print(plane) # Exporting # plane.export_xml('/tmp/plane.sdf') ``` #### Image A grayscale image can be used to extrude a set of boxes ``` # Default image image = create_sdf_element('image') print('Default image') print(image) print('Default image - as dict') print(image.to_dict()) print('Default image - as SDF') print(image.to_xml_as_str()) # Height of the extruded boxes image.height = 10 # The amount of error in the model image.granularity = 1 # Grayscale threshold image.threshold = 100 # Scaling factor applied to the image image.scale = [2] # URI of the grayscale image image.uri = 'filename' print('Custom image') print(image) # Exporting # image.export_xml('/tmp/image.sdf') ``` #### Mesh ``` mesh = create_sdf_element('mesh') print('Default mesh - with default parameters') print(mesh) print('Default mesh - as dict') print(mesh.to_dict()) print('Default mesh - as SDF') print(mesh.to_xml_as_str()) print('Mesh with optional parameters') mesh.reset(with_optional_elements=True) print(mesh) # Name of the submesh under the parent mesh mesh.submesh.name = 'submesh.stl' # Set to true to center the vertices of the submesh at (0, 0, 0) mesh.submesh.center = True # Scaling factor of the mesh mesh.scale = [2, 1, 1] # URI of the mesh mesh.uri = 'file://mesh.stl' print('Custom mesh') print(mesh) ``` #### Polyline ``` polyline = create_sdf_element('polyline') print('Default polyline - with default parameters') print(polyline) print('Default polyline - as dict') print(polyline.to_dict()) print('Default polyline - as SDF') print(polyline.to_xml_as_str()) print('Polyline with optional parameters') polyline.reset(with_optional_elements=True) print(polyline) # Set new height polyline.height = 2.3 # Customize point polyline.points[0].value = [2.3, 4.5] # Add new point polyline.add_point() # Set the coordinates of new point polyline.points[1].value = [3.7, 10.1] print('Custom polyline') print(polyline) ``` ### Creating a geometry entity ``` # Initially, the geometry is created with a <empty/> element geometry = create_sdf_element('geometry') print(geometry) # Creating a geometry for each of the basic forms # The geometry entity has a separate mode for each possible geometry forms, since it cannot hold # multiple geometries # When a new geometry is set, the former one is deleted print('All the geometry entity modes:') print(geometry.get_modes()) geometry.box = box print(geometry) geometry.cylinder = cylinder print(geometry) geometry.sphere = sphere print(geometry) geometry.plane = plane print(geometry) geometry.image = image print(geometry) geometry.mesh = mesh print(geometry) geometry.polyline = polyline print(geometry) ```
github_jupyter
# Gym Crowdedness Analysis with PCA > # Objective : To **predict** how crowded a university gym would be at a given time of day (and some other features, including weather) > # Data Decription : The dataset consists of 26,000 people counts (about every 10 minutes) over one year. The dataset also contains information about the weather and semester-specific information that might affect how crowded it is. The label is the number of people, which has to be predicted given some subset of the features. **Label**: - Number of people **Features**: 1. date (string; datetime of data) 2. timestamp (int; number of seconds since beginning of day) 3. dayofweek (int; 0 [monday] - 6 [sunday]) 4. is_weekend (int; 0 or 1) [boolean, if 1, it's either saturday or sunday, otherwise 0] 5. is_holiday (int; 0 or 1) [boolean, if 1 it's a federal holiday, 0 otherwise] 6. temperature (float; degrees fahrenheit) 7. isstartof_semester (int; 0 or 1) [boolean, if 1 it's the beginning of a school semester, 0 otherwise] 8. month (int; 1 [jan] - 12 [dec]) 9. hour (int; 0 - 23) > # Approach The model would be built and PCA would be implemented in the following way : - **Data Cleaning and PreProcessing** - **Exploratory Data Analysis :** - Uni-Variate Analysis : Histograms , Distribution Plots - Bi-Variate Analysis : Pair Plots - Correlation Matrix - **Processing :** - OneHotEncoding - Feature Scaling : Standard Scaler - **Splitting Dataset** - **Principal Component Analysis** - **Modelling : Random Forest** - Random forest without PCA - Random Forest with PCA - **Conclusion** ## `1` Data Cleaning and PreProcessing **Importing Libraries and loading Dataset** ``` import numpy as np # linear algebra import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df=pd.read_csv(r'C:\Users\kusht\OneDrive\Desktop\Excel-csv\PCA analysis.csv') #Replace it with your path where the data file is stored df.head() ``` **TASK : Print the `info()` of the dataset** ``` ### START CODE HERE (~ 1 Line of code) df.info() ### END CODE ``` **TASK : Describe the dataset using `describe()`** ``` ### START CODE HERE (~ 1 Line of code) df.describe() ### END CODE ``` **TASK : Convert temperature in farenheit into celsius scale using the formula `Celsius=(Fahrenheit-32)* (5/9)`** ``` ### START CODE HERE (~1 Line of code) df.temperature = df.temperature.apply(lambda x : (5/9)*(x-32)) ### END CODE ``` **TASK : Convert the timestamp into hours in 12 h format as its currently in seconds and drop `date` coulmn** ``` ### START CODE HERE: (~ 1 Line of code) df.timestamp = abs((df.timestamp/3600) - 12) df.drop('date' , axis=1 , inplace=True) ### END CODE ``` ## `2` Exploratory Data Analysis ### `2.1` Uni-Variate and Bi-Variate Analysis - **Pair Plots** **TASK : Use `pairplot()` to make different pair scatter plots of the entire dataframe** ``` ### START CODE HERE : plt.figsize=(12,12) sns.pairplot(df) ### END CODE ``` **TASK: Now analyse scatter plots between `number_people` and all other attributes using a `for loop` to properly know what are the ideal conditions for people to come to the gym** ``` ### START CODE HERE for i in df.columns : plt.figsize=(10,10) df.plot(kind='scatter' , x=i , y='number_people') ### END CODE ``` **Analyse the plots and understand :** 1. **At what time , temperature , week of the day more people come in?** Answer : During eve time around 6 pm -8 pm , on the first four days of the week when temperature is around 15-20 celsius , people prefer to come 2. **Whether people like to come to the gym in a holiday or a weekend or they prefer to come to gym during working days?** Answer : People prefer working days 3. **Which month is most preferable for people to come to the gym?** Answer: number of people keep decreasing from January to July. Then it increases and remains the highest in August , September , October only to decrease in novemeber and december. June , July has the least number of people and August, September, October the highest - **Distribution Plots** **TASK : Plot individual `distplot()` for `temperature` and `number_people` to check out the individual distribution of the attributes** ``` ### START CODE HERE : plt.figure(figsize=(15,15)) sns.distplot(df.temperature,kde=True,rug=True) sns.distplot(df.number_people,kde=True,rug=True) ### END CODE ``` ### `2.2` Correlation Matrix **TASK : Plot a correlation matrix and make it more understandable using `sns.heatmap`** ``` ### START CODE HERE : sns.heatmap(df.corr()) ### END CODE HERE ``` **Analyse the correlation matrix and understand the different dependencies of attributes on each other** ## `3.` Processing : ### `3.1` One hot encoding : One hot encoding certain attributes to not give any ranking/priority to any instance **TASK: One Hot Encode following attributes `month` , `hour` , `day of week`** ``` ## YOU CAN USE EITHER get_dummies() OR OneHotEncoder() ### START CODE HERE : columns = ["day_of_week", "month", "hour"] df = pd.get_dummies(df, columns=columns) df.head() ### END CODE ``` ### `3.2` Feature Scaling : Some attributes ranges are ver different compared to other values and during PCA implementation this might give a problem thus you need to standardise some of the attributes **TASK: Using `StandardScaler()` , standardise `temperature` and `timestamp`** ``` ## You can use two individual scalers one for temperature and other for timestamp ## you can use an array type data=df.values and standradise data then split data into X and y from sklearn.preprocessing import StandardScaler ### START CODE HERE : data=df.values scaler1 = StandardScaler() scaler1.fit(data[:, 1:2]) # for timestamp data[:,1:2] = scaler1.transform(data[:,1:2]) scaler2 = StandardScaler() scaler2.fit(data[:,4:5]) # for temperature data[:,4:5] = scaler2.transform(data[:,4:5]) ### END CODE HERE ``` ## `4.` Splitting the dataset : **TASK : Split the dataset into dependent and independent variables and name them y and X respectively** ``` ### START CODE HERE : X = data[:,1:] y=data[:,0] ### END CODE ``` **TASK : Split the X ,y into training and test set** ``` from sklearn.model_selection import train_test_split ### START CODE HERE : X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) ### END CODE ``` ## `5.` Principal Component Analysis Principal component analysis (PCA) is a technique for reducing the dimensionality of such datasets, increasing interpretability but at the same time minimizing information loss. It does so by creating new uncorrelated variables that successively maximize variance. **How does it work? :** - First, a matrix is calculated that summarizes how our variables all relate to one another. - Secondly , The matrix is broken down into two separate components: direction and magnitude. so its easy to understand the “directions” of the data and its “magnitude” (or how “important” each direction is). The photo below, displays the two main directions in this data: the “red direction” and the “green direction.” In this case, the “red direction” is the more important one as given how the dots are arranged, “red direction” comprises most of the data and thus is s more important than the “green direction” (Hint: Think of What would fitting a line of best fit to this data look like?) <img src="https://miro.medium.com/max/832/1*P8_C9uk3ewpRDtevf9wVxg.png"> - Then the data is transformed to align with these important directions (which are combinations of our original variables). The photo below is the same exact data as above, but transformed so that the x- and y-axes are now the “red direction” and “green direction.” What would the line of best fit look like here? <img src="https://miro.medium.com/max/1400/1*V3JWBvxB92Uo116Bpxa3Tw.png"> So PCA tries to find the most important directions in which most of the data is spread and thus reduces it to those components thereby reducing the number of attributes to train and increasing computational speed. A 3D example is given below : <img src="https://miro.medium.com/max/1024/1*vfLvJF8wHaQjDaWv6Mab2w.png"> As you can see above a 3D plot is reduced to a 2d plot still retaining most of the data **Now that you have understood this , lets try to implement it** **TASK : Print the PCA fit_transform of X(independent variables)** ``` from sklearn.decomposition import PCA ### START CODE HERE : pca = PCA() pca.fit_transform(X) ### END CODE ``` **TASK : Get covariance using `get_covariance()`** ``` ### START CODE HERE (~ 1 line of code) pca.get_covariance() ### END CODE HERE ``` **TASK : Get explained variance using `explained_variance_ratio`** ``` ### START CODE HERE : explained_variance=pca.explained_variance_ratio_ explained_variance ### END CODE ``` **TASK : Plot a bar graph of `explained variance`** ``` # you can use plt.bar() ### START CODE HERE : with plt.style.context('dark_background'): plt.figure(figsize=(15,12)) plt.bar(range(49), explained_variance, alpha=0.5, align='center', label='individual explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.legend(loc='best') plt.tight_layout() ### END CODE ``` **Analyse the plot and estimate how many componenets you want to keep** **TASK : Make a `PCA()` object with n_components =20 and fit-transform in the dataset (X) and assign to a new variable `X_new`** ``` ### START CODE HERE : pca=PCA(n_components=20) X_new=pca.fit_transform(X) X_new ### END CODE ``` Now , `X_new` is the dataset for PCA **TASK : Get Covariance using `get_covariance`** ``` ### START CODE HERE (~1 Line of code) pca.get_covariance() ### END CODE ``` **TASK : Get the explained variance using `explained_variance_ratio`** ``` ### START CODE HERE : explained_variance=pca.explained_variance_ratio_ explained_variance ### END CODE ``` **TASK : Plot bar plot of `exlpained variance`** ``` # You can use plt.bar() ### START CODE HERE: with plt.style.context('dark_background'): plt.figure(figsize=(6, 4)) plt.bar(range(20), explained_variance, alpha=0.5, align='center', label='individual explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.legend(loc='best') plt.tight_layout() ### END CODE ``` ## `6.` Modelling : Random Forest To understand Random forest classifier , lets first get a brief idea about Decision Trees in general. Decision Trees are very intuitive and at everyone have used this knowingly or unknowingly at some point . Basically the model keeps sorting them into categories forming a large tree by responses of some questons (decisions) and thats why its called decision tree. An image example would help understand it better : <img src="https://camo.githubusercontent.com/960e89743476577bd696b3ac16885cf1e1d19ad1/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f313030302f312a4c4d6f4a6d584373516c6369475445796f534e3339672e6a706567"> `Random Forest` : Random forest, like its name implies, consists of a large number of individual decision trees that operate as an [ensemble](https://en.wikipedia.org/wiki/Ensemble_learning) . Each individual tree in the random forest spits out a class prediction and the class with the most votes becomes our model’s prediction. <img src="https://camo.githubusercontent.com/30aec690ddc10fa0ae5d3135d0c7a6b745eb5918/68747470733a2f2f6d69726f2e6d656469756d2e636f6d2f6d61782f313030302f312a56484474566144504e657052676c49417637324246672e6a706567"> The fundamental concept is large number of relatively uncorrelated models (trees) operating as a committee will outperform any of the individual constituent models. Since this dataset has very low correlation between attributes , random forest can be a good option. In this section you'll have to make a random forest model and train it on both without PCA dataset and with PCA datset to analyse the differences ### `6.1` Random Forest Without PCA **TASK : Make a random forest model and train it on without PCA training set** ``` # Establish model from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() # Try different numbers of n_estimators and print the scores # You can use a variable estimators = np.arrange(10,200,10) and then a for loop to take all the values of estimators ### START CODE HERE estimators = np.arange(10, 200, 10) scores = [] for n in estimators: model.set_params(n_estimators=n) model.fit(X_train, y_train) scores.append(model.score(X_test, y_test)) print(scores) ### END CODE HERE ``` **TASK : Make a plot between `n_estimator` and `scores` to properly get the best number of estimators** ``` ## Use plt.plot ### START CODE HERE : plt.title("Effect of n_estimators") plt.xlabel("n_estimator") plt.ylabel("score") plt.plot(estimators, scores) ### END CODE HERE ``` ### `6.2` Random Forest With PCA **TASK : Split the your dataset with PCA into training and testing set** ``` from sklearn.model_selection import train_test_split ### START CODE HERE : X_train_pca, X_test_pca, y_train, y_test = train_test_split(X_new, y, test_size=0.2, random_state=1) X_train.shape ### END CODE ``` **TASK : Make a random forest model called `model_pca` and fit it into the new X_train and y_train and then print out the random forest scores for dataset with PCA applied to it** ``` # Establish model from sklearn.ensemble import RandomForestRegressor model_pca = RandomForestRegressor() # You can use different number of estimators # # You can use a variable estimators = np.arrange(10,200,10) and then a for loop to take all the values of estimators ### START CODE HERE : estimators = np.arange(10, 200, 10) scores_pca = [] for n in estimators: model_pca.set_params(n_estimators=n) model_pca.fit(X_train_pca, y_train) scores_pca.append(model_pca.score(X_test_pca, y_test)) print(scores_pca) ### END CODE ``` **TASK : Make a plot between `n_estimator` and `score` and find the best parameter** ``` # you can use plt.plot ### START CODE HERE : plt.title("Effect of n_estimators with PCA") plt.xlabel("n_estimator") plt.ylabel("scores") plt.plot(estimators, scores_pca) ### END CODE ``` This completes modelling and now its time to analyse your models ## `7.` Conclusion Analyse the plots and find the best n_estimator. you can also hypertune other parameter using GridSearchCV or Randomised search. Also understand whether using PCA was beneficial or not , if not try to justify it.
github_jupyter
### does training on clinvar predict disease better than single mpc? - rasopathies (noonan syndrome) * rm testing data from clinvar ``` import pandas as pd import numpy from scipy.stats import entropy import pydot, pydotplus, graphviz import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns from sklearn import linear_model, metrics, tree, svm from sklearn.neural_network import MLPClassifier from sklearn.externals.six import StringIO from sklearn.preprocessing import PolynomialFeatures from sklearn.ensemble import ExtraTreesClassifier from IPython.display import HTML %matplotlib inline cols = ['mpc'] key_cols = ['chrom', 'pos', 'ref', 'alt'] ho = ['CorrectBenign', 'CorrectPath', 'WrongBenign', 'WrongPath'] right_benign = "#32CD32" right_path = "#2ecc71" wrong_path = "#e74c3c" wrong_benign = "#ffcccb" flatui = [right_benign, right_path, wrong_benign, wrong_path] def eval_pred(row, col): if row[col] == row['y']: if row['y'] == 1: return 'CorrectPath' return 'CorrectBenign' if row['y'] == 1: return 'WrongPath' return 'WrongBenign' def eval_mpc_raw(row): if row['y'] == 1: if row['mpc']>=float(2): return 'CorrectPath' return 'WrongPath' if row['mpc']>=float(2): return 'WrongBenign' return 'CorrectBenign' # load clinvar dat_file = '../data/interim/clinvar/clinvar.limit3.dat' clinvar_df_pre = pd.read_csv(dat_file, sep='\t').rename(columns={'clin_class':'y'}) # other disease df: missense dat_file = '../data/interim/other/other.eff.dbnsfp.anno.hHack.dat.limit.xls' disease_df_pre = pd.read_csv(dat_file, sep='\t') disease_df_pre.loc[:, 'y'] = disease_df_pre.apply(lambda row: 1 if row['class']=='P' else 0, axis=1) disease = 'Rasopathies' disease_df = disease_df_pre[disease_df_pre.Disease==disease] test_keys = {':'.join([str(x) for x in v]):True for v in disease_df[key_cols].values} #tree_clf = tree.DecisionTreeClassifier(max_depth=1) #X, y = train_df[cols], train_df['y'] #tree_clf.fit(X, y) crit = clinvar_df_pre.apply(lambda row: not ':'.join([str(row[x]) for x in key_cols]) in test_keys, axis=1) clinvar_df = clinvar_df_pre[crit] print('clinvar w/o testing data', len(clinvar_df)) disease_genes = set(disease_df['gene']) crit = clinvar_df.apply(lambda row: row['gene'] in disease_genes, axis=1) clinvar_df_limit_genes = clinvar_df[crit] print('clinvar w/o testing data for disease genes', len(clinvar_df_limit_genes)) disease_panel_gene_count = len(set(disease_df['gene'])) print(disease, disease_panel_gene_count) disease_df.groupby('y').size() gg = disease_df.groupby('y').size().reset_index().rename(columns={0:'size'}) list(gg[gg.y==0]['size'])[0] # train clinvar # apply mpc>2 tree_clf_clinvar = tree.DecisionTreeClassifier(max_depth=1) X, y = clinvar_df[cols], clinvar_df['y'] tree_clf_clinvar.fit(X, y) tree_clf_clinvar_limit_genes = tree.DecisionTreeClassifier(max_depth=1) X, y = clinvar_df_limit_genes[cols], clinvar_df_limit_genes['y'] tree_clf_clinvar_limit_genes.fit(X, y) # one gene at a time disease = 'Rasopathies' acc_df_ls = [] genes = set(disease_df['gene']) tree_clf_clinvar = tree.DecisionTreeClassifier(max_depth=1) X, y = clinvar_df[cols], clinvar_df['y'] tree_clf_clinvar.fit(X, y) for test_gene in genes: sub_train_df = disease_df[disease_df.gene != test_gene] tree_clf_sub = tree.DecisionTreeClassifier(max_depth=1) X, y = sub_train_df[cols], sub_train_df['y'] tree_clf_sub.fit(X, y) test_df = disease_df[disease_df.gene == test_gene] X_test = test_df[cols] preds = tree_clf_sub.predict(X_test) test_df['mpc_pred'] = preds test_df.loc[:, 'PredictionStatusMPC'] = test_df.apply(lambda row: eval_pred(row, 'mpc_pred'), axis=1) preds = tree_clf_clinvar.predict(X_test) test_df['mpc_pred_clinvar'] = preds test_df.loc[:, 'PredictionStatusMPC_clinvar'] = test_df.apply(lambda row: eval_pred(row, 'mpc_pred_clinvar'), axis=1) preds = tree_clf_clinvar_limit_genes.predict(X_test) test_df['mpc_pred_clinvar_limit_genes'] = preds test_df.loc[:, 'PredictionStatusMPC_clinvar_limit_genes'] = test_df.apply(lambda row: eval_pred(row, 'mpc_pred_clinvar_limit_genes'), axis=1) # apply mpc>=2 test_df.loc[:, 'PredictionStatusMPC>2'] = test_df.apply(eval_mpc_raw, axis=1) acc_df_ls.append(test_df) test_df = pd.concat(acc_df_ls) g_df = (test_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatusMPC']] .groupby(['gene', 'PredictionStatusMPC']) .size().reset_index().rename(columns={0:'size'})) dd = g_df.groupby('gene').sum().reset_index() sns.set(font_scale=1.75) ss = sns.factorplot(x='gene', hue='PredictionStatusMPC', y='size', data=g_df, hue_order=ho, kind='bar', palette=sns.color_palette(flatui), size=5, aspect=3) ss.set_ylabels('%s CV panel missense variants' % (disease,)) ss.set_xlabels('') ss.set_titles('MPC performance') #ss.savefig("../docs/plots/%s_cv_mpc_eval.png" % (disease,)) g_df = (test_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatusMPC']] .groupby(['PredictionStatusMPC']) .size().reset_index().rename(columns={0:'size'})) g_df.head() g_df = (test_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatusMPC_clinvar']] .groupby(['gene', 'PredictionStatusMPC_clinvar']) .size().reset_index().rename(columns={0:'size'})) dd = g_df.groupby('gene').sum().reset_index() sns.set(font_scale=1.75) ss = sns.factorplot(x='gene', hue='PredictionStatusMPC_clinvar', y='size', data=g_df, hue_order=ho, kind='bar', palette=sns.color_palette(flatui), size=5, aspect=3) ss.set_ylabels('%s ClinVar-trained missense variants' % (disease,)) ss.set_xlabels('') ss.set_titles('MPC performance') #ss.savefig("../docs/plots/%s_cv_mpc_eval.png" % (disease,)) g_df = (test_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatusMPC_clinvar']] .groupby(['PredictionStatusMPC_clinvar']) .size().reset_index().rename(columns={0:'size'})) g_df.head() g_df = (test_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatusMPC_clinvar_limit_genes']] .groupby(['gene', 'PredictionStatusMPC_clinvar_limit_genes']) .size().reset_index().rename(columns={0:'size'})) dd = g_df.groupby('gene').sum().reset_index() sns.set(font_scale=1.75) ss = sns.factorplot(x='gene', hue='PredictionStatusMPC_clinvar_limit_genes', y='size', data=g_df, hue_order=ho, kind='bar', palette=sns.color_palette(flatui), size=5, aspect=3) ss.set_ylabels('%s ClinVar-trained (limit genes) missense variants' % (disease,)) ss.set_xlabels('') #ss.set_titles('MPC performance') #ss.savefig("../docs/plots/%s_cv_mpc_eval.png" % (disease,)) g_df = (test_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatusMPC_clinvar_limit_genes']] .groupby(['PredictionStatusMPC_clinvar_limit_genes']) .size().reset_index().rename(columns={0:'size'})) g_df.head() g_df = (test_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatusMPC>2']] .groupby(['gene', 'PredictionStatusMPC>2']) .size().reset_index().rename(columns={0:'size'})) dd = g_df.groupby('gene').sum().reset_index() sns.set(font_scale=1.75) ss = sns.factorplot(x='gene', hue='PredictionStatusMPC>2', y='size', data=g_df, hue_order=ho, kind='bar', palette=sns.color_palette(flatui), size=5, aspect=3) ss.set_ylabels('%s Not trained missense variants' % (disease,)) ss.set_xlabels('') ss.set_titles('MPC>2 performance') g_df = (test_df[['gene', 'chrom', 'pos', 'ref', 'alt', 'PredictionStatusMPC>2']] .groupby(['PredictionStatusMPC>2']) .size().reset_index().rename(columns={0:'size'})) g_df.head() g_df.values ```
github_jupyter
## Installieren aller Pakete ``` import warnings warnings.filterwarnings('ignore') import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' !pip3 install -r ../requirements.txt import torch from torch import nn from torchvision import transforms as T from PIL import Image import numpy as np from pathlib import Path from collections import deque import random, datetime, os, copy # Gym is an OpenAI toolkit for RL import gym from gym.spaces import Box from gym.wrappers import FrameStack # NES Emulator for OpenAI Gym from nes_py.wrappers import JoypadSpace # Super Mario environment for OpenAI Gym import gym_super_mario_bros import numpy as np from nes_py.wrappers import JoypadSpace import gym_super_mario_bros from time import sleep from gym_super_mario_bros.actions import SIMPLE_MOVEMENT import random, datetime import time from pathlib import Path import gym import gym_super_mario_bros from gym.wrappers import FrameStack, GrayScaleObservation, TransformObservation from nes_py.wrappers import JoypadSpace from utils.agent import Mario from utils.wrappers import ResizeObservation, SkipFrame import os os.environ['KMP_DUPLICATE_LIB_OK']='True' import random, datetime from pathlib import Path import gym import gym_super_mario_bros from gym.wrappers import FrameStack, GrayScaleObservation, TransformObservation from nes_py.wrappers import JoypadSpace class SkipFrame(gym.Wrapper): def __init__(self, env, skip): """Return only every `skip`-th frame""" super().__init__(env) self._skip = skip def step(self, action): """Repeat action, and sum reward""" total_reward = 0.0 done = False for i in range(self._skip): # Accumulate reward and repeat the same action obs, reward, done, info = self.env.step(action) total_reward += reward if done: break return obs, total_reward, done, info class GrayScaleObservation(gym.ObservationWrapper): def __init__(self, env): super().__init__(env) obs_shape = self.observation_space.shape[:2] self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8) def permute_orientation(self, observation): # permute [H, W, C] array to [C, H, W] tensor observation = np.transpose(observation, (2, 0, 1)) observation = torch.tensor(observation.copy(), dtype=torch.float) return observation def observation(self, observation): observation = self.permute_orientation(observation) transform = T.Grayscale() observation = transform(observation) return observation class ResizeObservation(gym.ObservationWrapper): def __init__(self, env, shape): super().__init__(env) if isinstance(shape, int): self.shape = (shape, shape) else: self.shape = tuple(shape) obs_shape = self.shape + self.observation_space.shape[2:] self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8) def observation(self, observation): transforms = T.Compose( [T.Resize(self.shape), T.Normalize(0, 255)] ) observation = transforms(observation).squeeze(0) return observation class Mario: def __init__(self, state_dim, action_dim, save_dir): self.state_dim = state_dim self.action_dim = action_dim self.save_dir = save_dir self.use_cuda = torch.cuda.is_available() # Mario's DNN to predict the most optimal action - we implement this in the Learn section self.net = MarioNet(self.state_dim, self.action_dim).float() if self.use_cuda: self.net = self.net.to(device="cuda") self.exploration_rate = 1 self.exploration_rate_decay = 0.99999975 self.exploration_rate_min = 0.1 self.curr_step = 0 self.save_every = 5e5 # no. of experiences between saving Mario Net def act(self, state): """ Given a state, choose an epsilon-greedy action and update value of step. Inputs: state(LazyFrame): A single observation of the current state, dimension is (state_dim) Outputs: action_idx (int): An integer representing which action Mario will perform """ # EXPLORE if np.random.rand() < self.exploration_rate: action_idx = np.random.randint(self.action_dim) # EXPLOIT else: state = state.__array__() if self.use_cuda: state = torch.tensor(state).cuda() else: state = torch.tensor(state) state = state.unsqueeze(0) action_values = self.net(state, model="online") action_idx = torch.argmax(action_values, axis=1).item() # decrease exploration_rate self.exploration_rate *= self.exploration_rate_decay self.exploration_rate = max(self.exploration_rate_min, self.exploration_rate) # increment step self.curr_step += 1 return action_idx class Mario(Mario): # subclassing for continuity def __init__(self, state_dim, action_dim, save_dir): super().__init__(state_dim, action_dim, save_dir) self.memory = deque(maxlen=100000) self.batch_size = 32 def cache(self, state, next_state, action, reward, done): """ Store the experience to self.memory (replay buffer) Inputs: state (LazyFrame), next_state (LazyFrame), action (int), reward (float), done(bool)) """ state = state.__array__() next_state = next_state.__array__() if self.use_cuda: state = torch.tensor(state).cuda() next_state = torch.tensor(next_state).cuda() action = torch.tensor([action]).cuda() reward = torch.tensor([reward]).cuda() done = torch.tensor([done]).cuda() else: state = torch.tensor(state) next_state = torch.tensor(next_state) action = torch.tensor([action]) reward = torch.tensor([reward]) done = torch.tensor([done]) self.memory.append((state, next_state, action, reward, done,)) def recall(self): """ Retrieve a batch of experiences from memory """ batch = random.sample(self.memory, self.batch_size) state, next_state, action, reward, done = map(torch.stack, zip(*batch)) return state, next_state, action.squeeze(), reward.squeeze(), done.squeeze() class MarioNet(nn.Module): """mini cnn structure input -> (conv2d + relu) x 3 -> flatten -> (dense + relu) x 2 -> output """ def __init__(self, input_dim, output_dim): super().__init__() c, h, w = input_dim if h != 84: raise ValueError(f"Expecting input height: 84, got: {h}") if w != 84: raise ValueError(f"Expecting input width: 84, got: {w}") self.online = nn.Sequential( nn.Conv2d(in_channels=c, out_channels=32, kernel_size=8, stride=4), nn.ReLU(), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1), nn.ReLU(), nn.Flatten(), nn.Linear(3136, 512), nn.ReLU(), nn.Linear(512, output_dim), ) self.target = copy.deepcopy(self.online) # Q_target parameters are frozen. for p in self.target.parameters(): p.requires_grad = False def forward(self, input, model): if model == "online": return self.online(input) elif model == "target": return self.target(input) class Mario(Mario): def __init__(self, state_dim, action_dim, save_dir): super().__init__(state_dim, action_dim, save_dir) self.gamma = 0.9 def td_estimate(self, state, action): current_Q = self.net(state, model="online")[ np.arange(0, self.batch_size), action ] # Q_online(s,a) return current_Q @torch.no_grad() def td_target(self, reward, next_state, done): next_state_Q = self.net(next_state, model="online") best_action = torch.argmax(next_state_Q, axis=1) next_Q = self.net(next_state, model="target")[ np.arange(0, self.batch_size), best_action ] return (reward + (1 - done.float()) * self.gamma * next_Q).float() class Mario(Mario): def __init__(self, state_dim, action_dim, save_dir): super().__init__(state_dim, action_dim, save_dir) self.optimizer = torch.optim.Adam(self.net.parameters(), lr=0.00025) self.loss_fn = torch.nn.SmoothL1Loss() def update_Q_online(self, td_estimate, td_target): loss = self.loss_fn(td_estimate, td_target) self.optimizer.zero_grad() loss.backward() self.optimizer.step() return loss.item() def sync_Q_target(self): self.net.target.load_state_dict(self.net.online.state_dict()) class Mario(Mario): def save(self): save_path = ( self.save_dir / f"mario_net_{int(self.curr_step // self.save_every)}.chkpt" ) torch.save( dict(model=self.net.state_dict(), exploration_rate=self.exploration_rate), save_path, ) print(f"MarioNet saved to {save_path} at step {self.curr_step}") def load(self, load_dir): save_path = ( self.save_dir / f"mario_net_{int(self.curr_step // self.save_every)}.chkpt" ) torch.save( dict(model=self.net.state_dict(), exploration_rate=self.exploration_rate), save_path, ) print(f"MarioNet saved to {save_path} at step {self.curr_step}") class Mario(Mario): def __init__(self, state_dim, action_dim, save_dir): super().__init__(state_dim, action_dim, save_dir) self.burnin = 1e4 # min. experiences before training self.learn_every = 3 # no. of experiences between updates to Q_online self.sync_every = 1e4 # no. of experiences between Q_target & Q_online sync def learn(self): if self.curr_step % self.sync_every == 0: self.sync_Q_target() if self.curr_step % self.save_every == 0: self.save() if self.curr_step < self.burnin: return None, None if self.curr_step % self.learn_every != 0: return None, None # Sample from memory state, next_state, action, reward, done = self.recall() # Get TD Estimate td_est = self.td_estimate(state, action) # Get TD Target td_tgt = self.td_target(reward, next_state, done) # Backpropagate loss through Q_online loss = self.update_Q_online(td_est, td_tgt) return (td_est.mean().item(), loss) import numpy as np import time, datetime import matplotlib.pyplot as plt class MetricLogger: def __init__(self, save_dir): self.save_log = save_dir / "log" with open(self.save_log, "w") as f: f.write( f"{'Episode':>8}{'Step':>8}{'Epsilon':>10}{'MeanReward':>15}" f"{'MeanLength':>15}{'MeanLoss':>15}{'MeanQValue':>15}" f"{'TimeDelta':>15}{'Time':>20}\n" ) self.ep_rewards_plot = save_dir / "reward_plot.jpg" self.ep_lengths_plot = save_dir / "length_plot.jpg" self.ep_avg_losses_plot = save_dir / "loss_plot.jpg" self.ep_avg_qs_plot = save_dir / "q_plot.jpg" # History metrics self.ep_rewards = [] self.ep_lengths = [] self.ep_avg_losses = [] self.ep_avg_qs = [] # Moving averages, added for every call to record() self.moving_avg_ep_rewards = [] self.moving_avg_ep_lengths = [] self.moving_avg_ep_avg_losses = [] self.moving_avg_ep_avg_qs = [] # Current episode metric self.init_episode() # Timing self.record_time = time.time() def log_step(self, reward, loss, q): self.curr_ep_reward += reward self.curr_ep_length += 1 if loss: self.curr_ep_loss += loss self.curr_ep_q += q self.curr_ep_loss_length += 1 def log_episode(self): "Mark end of episode" self.ep_rewards.append(self.curr_ep_reward) self.ep_lengths.append(self.curr_ep_length) if self.curr_ep_loss_length == 0: ep_avg_loss = 0 ep_avg_q = 0 else: ep_avg_loss = np.round(self.curr_ep_loss / self.curr_ep_loss_length, 5) ep_avg_q = np.round(self.curr_ep_q / self.curr_ep_loss_length, 5) self.ep_avg_losses.append(ep_avg_loss) self.ep_avg_qs.append(ep_avg_q) self.init_episode() def init_episode(self): self.curr_ep_reward = 0.0 self.curr_ep_length = 0 self.curr_ep_loss = 0.0 self.curr_ep_q = 0.0 self.curr_ep_loss_length = 0 def record(self, episode, epsilon, step): mean_ep_reward = np.round(np.mean(self.ep_rewards[-100:]), 3) mean_ep_length = np.round(np.mean(self.ep_lengths[-100:]), 3) mean_ep_loss = np.round(np.mean(self.ep_avg_losses[-100:]), 3) mean_ep_q = np.round(np.mean(self.ep_avg_qs[-100:]), 3) self.moving_avg_ep_rewards.append(mean_ep_reward) self.moving_avg_ep_lengths.append(mean_ep_length) self.moving_avg_ep_avg_losses.append(mean_ep_loss) self.moving_avg_ep_avg_qs.append(mean_ep_q) last_record_time = self.record_time self.record_time = time.time() time_since_last_record = np.round(self.record_time - last_record_time, 3) print( f"Episode {episode} - " f"Step {step} - " f"Epsilon {epsilon} - " f"Mean Reward {mean_ep_reward} - " f"Mean Length {mean_ep_length} - " f"Mean Loss {mean_ep_loss} - " f"Mean Q Value {mean_ep_q} - " f"Time Delta {time_since_last_record} - " f"Time {datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}" ) with open(self.save_log, "a") as f: f.write( f"{episode:8d}{step:8d}{epsilon:10.3f}" f"{mean_ep_reward:15.3f}{mean_ep_length:15.3f}{mean_ep_loss:15.3f}{mean_ep_q:15.3f}" f"{time_since_last_record:15.3f}" f"{datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):>20}\n" ) for metric in ["ep_rewards", "ep_lengths", "ep_avg_losses", "ep_avg_qs"]: plt.plot(getattr(self, f"moving_avg_{metric}")) plt.savefig(getattr(self, f"{metric}_plot")) plt.clf() ``` # Hot Topic: Reinforcement Learning <img width=70% src="../images/pawel-czerwinski-oNcFi_dYz_0-unsplash.jpg"> In diesem Exkurs werden wir eine kurze Einführung in Reinforcement Learning geben und anhand eines praktischen Beispiels die Funktionsweise kurz aufzeigen. Wir verwenden hierbei **OpenAI Gym**, einer Entwicklungsplatform für Reinforcement Learning. ## Wiederholung Reinforcement Learning <img width=70% src="../images/reinforcement.jpg"> Reinforcement Learning ist eines der vier Paradigmen im Machine Learning neben Supervised, Unsupervised und Semi-Supervised Learning. Im Reinforcement Learning agiert ein Agent mit Aktionen mit der Umwelt. Die Umwelt liefert wiederum Belohnungen und den aktuellen Stand der Umwelt als Beobachtung. Als Lernprozess versucht man die sogenannte Policy/Strategie des Agenten anzupassen und zu verbessern. Grundsätzlich unterscheidet man zwischen **Exploration** und **Exploitation** im Training. Einerseits will man bisherige Strategien verbessern, aber auch zu einem gewissen Teil neue Strategien austesten. Das wird meist mit einem Parameter spezifiziert. Zu Beginn ist die Exploration deutlich ausgeprägter als bei fortgeschrittenem Training. In dem Einsteiger Kurs "Künstliche Intelligenz und maschinelles Lernen für Einsteiger" sind wir in einer Einheit etwas genauer auf Reinforcement Learning eingegangen. [Link hier.](https://open.hpi.de/courses/kieinstieg2020/items/3OBaGkP33fILA2t8FwqiC1) ## OpenAI Gym Environment <img width=50% src="../images/openai-cover.png"> In unserer Einheit nutzen wir OpenAI Gym. OpenAI Gym ist ein Open Source Umgebung für die Entwicklung von Reinforcement Learning Modellen für das es eine Vielzahl sogenannter Environments gibt. Darunter sind Atari-Spiele, Nintendo-Spiele sowie Umgebungen mit Robotern, die es in der Realität zu kaufen gibt. Hier mal ein Ausschnitt mehrerer Umgebungen. <img width=70% src="../images/environments.jpg"> # SuperMario Bros. Umgebung Im ersten Schritt wollen wir uns einmal die Umgebung ansehen und auch graphisch ausgeben lassen. Wir haben uns hier für das bekannte Nintendo-Spiel SuperMario Bros. entschieden als Environment. Die Environment wird als [OpenSource Projekt](https://github.com/Kautenja/gym-super-mario-bros) von anderen EntwicklerInnen zur Verfügung gestellt. Im ersten Schritt definieren wir die Umgebung und anschließend führen für eine gewisse Anzahl von Schritten jeweils zufällige Aktionen aus. Hierbei findet noch kein Lernvorgang statt - wir wollen nur erst einmal die Umgebung zeigen. Um die Ausführung zu unterbrechen, kann man im Menü oben die aktuelle Zellenausführung stoppen. ``` env = gym_super_mario_bros.make('SuperMarioBros-1-1-v0') env = JoypadSpace(env, SIMPLE_MOVEMENT) done = True for step in range(1000): if done: state = env.reset() state, reward, done, info = env.step(env.action_space.sample()) env.render() time.sleep(.05) env.close() ``` Für unser richtiges Reinforcement Learning Modell müssen wir allerdings noch weitere Schritte ausführen. Zunächst sehen wir uns einmal an, welche möglichen Actions wir zur Verfügung haben in dieser Environment. Wir werden das anschließend nur auf zwei Actions limitieren, um schneller zu trainieren. Wir verwenden dann nur noch **right** (walk right) und **right, A** (jump right). ``` env = gym_super_mario_bros.make("SuperMarioBros-1-1-v0") gym_super_mario_bros.actions.COMPLEX_MOVEMENT env = JoypadSpace(env, [["right"], ["right", "A"]]) ``` Wollen wir erfahren, was wir von unserer Environment als Informationen bekommen, so geben wir uns einmal die Rückgabewerte der Umgebung aus. Hier sieht man jedoch, dass wir nur relativ wenige Informationen direkt bekommen. Um unser Modell richtig trainieren zu können, müssen wir den aktuellen Stand eines Spiels jeweils als Bild erhalten. Unser Modell lernt anhand der Bilder des aktuellen Spielstandes. Dabei werden sogenannte Convolutional Neural Networks genutzt, die wir in Woche 4 etwas genauer betrachten werden. ``` env.reset() next_state, reward, done, info = env.step(action=0) print("Next-State: " + str(next_state.shape)) print("Reward: " + str(reward)) print("Done: " + str(done)) print("Info: " + str(info)) ``` Damit wir Feedback der Environment auch in Form von "Bildern" bekommen, müssen wir folgendes tun. ``` env = SkipFrame(env, skip=4) env = GrayScaleObservation(env) env = ResizeObservation(env, shape=84) env = TransformObservation(env, f=lambda x: x / 255.) env = FrameStack(env, num_stack=4) ``` # Q-Learning Im Folgenden werden wir den Q-Learning Ansatz verwenden. Beim Q-Learning kann der Agent die Belohnungen der Umgebung nutzen, um im Laufe der Zeit zu lernen, welche Aktion in einem bestimmten Zustand der Environment am besten ist. Weiter werden wir hier nicht ins Detail gehen. Die Implementierung und finales Modell stammt von [YuansongFeng](https://github.com/YuansongFeng/MadMario) und einem [Pytorch-Tutorial](https://pytorch.org/tutorials/intermediate/mario_rl_tutorial.html). # Die ersten Schritte beim Lernen Wir wollen uns zunächst einmal ansehen, wie unser Modell sich ohne Training schlägt. Anschließend werden wir mehrere bereits trainierte Modell laden und uns verschieden gute Modell ansehen. In der Präsentation werden wir das nicht zeigen, doch wir haben in den Notebooks auch den Code zum Training der einzelnen Modelle. Da das sehr zeitintensiv ist, werden wir nur bereits trainierte Modelle verwenden. Das Training geschieht in sogenannten Episoden. Das Training von 1.000 Episoden kann bereits mehrere Stunden dauern. Für ein "wirklich gutes" Modell benötigt man mehrere zehntausend Episoden Training. ``` mario = Mario(state_dim=(4, 84, 84), action_dim=env.action_space.n, save_dir="") episodes = 100 for e in range(episodes): state = env.reset() while True: env.render() time.sleep(.05) action = mario.act(state) # Agent führt Aktion aus und bekommt dafür Feedback der Environment next_state, reward, done, info = env.step(action) # Mario merkt sich den aktuellen Stand mario.cache(state, next_state, action, reward, done) # Mario führt einen Lernschritt aus q, loss = mario.learn() state = next_state if done or info["flag_get"]: break ``` # Training 100 Episodes ``` save_dir = Path("checkpoints_100") / datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") save_dir.mkdir(parents=True) mario = Mario(state_dim=(4, 84, 84), action_dim=env.action_space.n, save_dir=save_dir) logger = MetricLogger(save_dir) episodes = 100 for e in range(episodes): if e % 10 == 0: print("Episode: " + str(e)) state = env.reset() # Play the game! while True: # Run agent on the state action = mario.act(state) # Agent performs action next_state, reward, done, info = env.step(action) # Remember mario.cache(state, next_state, action, reward, done) # Learn q, loss = mario.learn() # Logging logger.log_step(reward, loss, q) # Update state state = next_state # Check if end of game if done or info["flag_get"]: break logger.log_episode() if e % 10 == 0: logger.record(episode=e, epsilon=mario.exploration_rate, step=mario.curr_step) mario.save() ``` # Training 1000 Episodes ``` # Start Training save_dir = Path("checkpoints_1000") / datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") save_dir.mkdir(parents=True) mario = Mario(state_dim=(4, 84, 84), action_dim=env.action_space.n, save_dir=save_dir) logger = MetricLogger(save_dir) episodes = 1000 for e in range(episodes): if e % 100 == 0: print("Episode: " + str(e)) state = env.reset() # Play the game! while True: # Run agent on the state action = mario.act(state) # Agent performs action next_state, reward, done, info = env.step(action) # Remember mario.cache(state, next_state, action, reward, done) # Learn q, loss = mario.learn() # Update state state = next_state # Check if end of game if done or info["flag_get"]: break if e % 100 == 0: logger.record(episode=e, epsilon=mario.exploration_rate, step=mario.curr_step) mario.save() ``` # Training 10.000 Episodes ``` save_dir = Path("checkpoints_10000") / datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") save_dir.mkdir(parents=True) mario = Mario(state_dim=(4, 84, 84), action_dim=env.action_space.n, save_dir=save_dir) logger = MetricLogger(save_dir) episodes = 10000 for e in range(episodes): if e % 10000 == 0: print("Episode: " + str(e)) state = env.reset() # Play the game! while True: # Run agent on the state action = mario.act(state) # Agent performs action next_state, reward, done, info = env.step(action) # Remember mario.cache(state, next_state, action, reward, done) # Learn q, loss = mario.learn() # Logging logger.log_step(reward, loss, q) # Update state state = next_state # Check if end of game if done or info["flag_get"]: break logger.log_episode() if e % 10000 == 0: logger.record(episode=e, epsilon=mario.exploration_rate, step=mario.curr_step) model.save() ``` # Trainierte Modelle Nachdem wir uns unseren Agenten einmal komplett ohne Training angesehen haben, so wollen wir uns einmal das Verhalten nach 100 Episoden ansehen. Vorher muss zuerst allerdings die Environment zurückgesetzt werden, dass wir die Modelle laden können. ``` import gym import gym_super_mario_bros from gym.wrappers import FrameStack, GrayScaleObservation, TransformObservation from nes_py.wrappers import JoypadSpace from utils.agent import Mario from utils.wrappers import ResizeObservation, SkipFrame # Initialize Super Mario environment env = gym_super_mario_bros.make('SuperMarioBros-1-1-v0') # Limit the action-space to # 0. walk right # 1. jump right env = JoypadSpace( env, [['right'], ['right', 'A']] ) # Apply Wrappers to environment env = SkipFrame(env, skip=4) env = GrayScaleObservation(env, keep_dim=False) env = ResizeObservation(env, shape=84) env = TransformObservation(env, f=lambda x: x / 255.) env = FrameStack(env, num_stack=4) env.reset() ``` # Modell mit 100 Episoden Training ``` checkpoint = Path('models/mario_100ep.chkpt') mario = Mario(state_dim=(4, 84, 84), action_dim=env.action_space.n, save_dir="", checkpoint=checkpoint) mario.exploration_rate = mario.exploration_rate_min episodes = 100 for e in range(episodes): state = env.reset() while True: env.render() time.sleep(.05) action = mario.act(state) next_state, reward, done, info = env.step(action) mario.cache(state, next_state, action, reward, done) state = next_state if done or info['flag_get']: break ``` # Modell mit 1000 Episoden Training Nachdem unser Modell mit 100 Episoden noch keine großartigen Fortschritte zu verzeichnen hat, wollen wir uns mal das Modell nach circa 1000 Episoden ansehen. ``` checkpoint = Path('models/mario_1000ep.chkpt') mario = Mario(state_dim=(4, 84, 84), action_dim=env.action_space.n, save_dir="", checkpoint=checkpoint) episodes = 100 for e in range(episodes): state = env.reset() while True: env.render() time.sleep(.05) action = mario.act(state) next_state, reward, done, info = env.step(action) mario.cache(state, next_state, action, reward, done) state = next_state if done or info['flag_get']: break ``` # Fertiges Modell Wenn wir nun uns ein "fertiges" Modell einmal ansehen, sehen wir deutlich besseres Verhalten und höheren Reward (Erreichen des Ziels, Münzen, kürzere Zeiten etc.). Das hier gezeigte Modell wurde für mehrere zehntausend Episoden trainiert - ist aber noch keinesfalls perfekt. Natürlich trainieren wir hier auf sehr einfachen Levels des Spiels. Zudem haben wir zu Beginn die Aktionen nur auf zwei beschränkt. Gibt man dem Agenten mehrere Aktionen zur Auswahl und mehr Episoden im Training erhalten wir sicherlich noch ein besseres Modell. ``` checkpoint = Path('models/mario_trained.chkpt') mario = Mario(state_dim=(4, 84, 84), action_dim=env.action_space.n, save_dir="", checkpoint=checkpoint) episodes = 100 for e in range(episodes): state = env.reset() while True: env.render() time.sleep(.05) action = mario.act(state) next_state, reward, done, info = env.step(action) mario.cache(state, next_state, action, reward, done) state = next_state if done or info['flag_get']: break ``` Das war es zum Exkurs Reinforcement Learning. Falls Sie Reinforcement Learning oder einige der Anwendungsfälle interessieren, so können Sie mit https://gym.openai.com/ sehr einfach damit starten. Viel Spaß beim selbst ausprobieren.
github_jupyter
# Summary of the extracted data This notebook contains the code to perform a summary of the initial tweets extracted. It includes: - number of user - number of tweets per user (average) - barplot with tweets per year and category - pie chart with % of tweets per category **Note that to obtain these numbers and perform the previous graphics we will use the "semiclean" data, tweets after all the filtering steps but previous to removing the tweets that refer to more than one category.** Additionally, and based on external information, we also create a barplot with the tweets in english per year. ``` ##################### # Load Libraries # ##################### #install.packages(c("ggplot2", "dplyr", "scales", "viridis") ) install.packages("Kendall") install.packages("wordcloud") install.packages("tm") install.packages("slam") library("Kendall") library("ggplot2") library("dplyr") library("scales") library("viridis") library("reshape2") library("wordcloud") library("tm") library("slam") rm(list=ls()) ``` First, we read all the files in the "SemiCleanAndAggregateTweets" and we put all of them together. ``` ################################# # TWEETS PER YEAR AND CATEGORY # ################################ fileList <- list.files("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", pattern = "_SemiCleanTweets.txt") for( i in 1:length( fileList ) ){ if( i == 1 ){ totalSemiCleanTweets <- read.delim(paste0( "/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", fileList[i])) }else{ intermediateFile <- read.delim(paste0( "/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", fileList[i])) totalSemiCleanTweets <- rbind( totalSemiCleanTweets, intermediateFile ) } } dim(totalSemiCleanTweets) ``` Then, we estimate the total number of users and the tweets per user. ``` ############################################### ## Estimate total users and tweets per user ## ############################################## length(unique(totalSemiCleanTweets$username)) tweetsPerUser <- as.data.frame( table( totalSemiCleanTweets$username)) summary(tweetsPerUser$Freq) head(tweetsPerUser) sum(tweetsPerUser$Freq>100) ``` ## Tweets per category and year We read all the tweets in the "SemiCleanAndAggregateTweets" folder and we extract for each tweet the year when it was published and we re-write the files to save the information. ``` fileList <- list.files("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", pattern = "_SemiCleanTweets.txt") for( i in 1:length( fileList ) ){ allInfo <- read.delim(paste0( "/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", fileList[i])) allInfo <- allInfo[, c("text", "date")] allInfo$year <- sapply(strsplit( as.character(allInfo$date), "-"), '[', 1) allInfo$yearMonth <- paste0( sapply(strsplit( as.character(allInfo$date), "-"), '[', 1), "-", sapply(strsplit( as.character(allInfo$date), "-"), '[', 2) ) years <- sort(unique( allInfo$year)) results <- as.data.frame( matrix( ncol = 2, nrow= length( years ))) colnames(results) <- c("year", "totalTweets") results$year <- years for( j in 1:nrow(results)){ results$totalTweets[j] <- nrow( allInfo[ allInfo$year == results$year[j], ] ) } results$category <- gsub( "_SemiCleanTweets.txt", "", fileList[i]) write.table( results, file= paste0( "/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", results$category[1],"_tweetsPerYear.txt"), col.names = TRUE, row.names = FALSE, sep = "\t", quote = FALSE) } ``` Then, we read the new files generated with the year and put all of them together, creating at the end a table with number of tweets per category and year. ``` fileList <- list.files("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", pattern = "_tweetsPerYear.txt") fileList for( i in 1:length( fileList ) ){ if( i == 1){ allInfo <- read.delim(paste0( "/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", fileList[i])) }else{ newFile <- read.delim(paste0( "/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", fileList[i])) allInfo <- rbind( allInfo, newFile ) } } byYearTweets <- allInfo[ allInfo$year != "Total", ] dataSet <- byYearTweets[, c("year", "totalTweets", "category")] dataSet$year <- as.factor( dataSet$year) ``` With this data generated, we create a stacked barplot showing tweets per each BC category and per year. ``` ############################################################ # Stacked barplot with tweets of each BC category per year # ############################################################ barTotalByYear <- ggplot(dataSet, aes(fill=category, y=totalTweets, x=year)) + geom_bar(position="stack", stat="identity") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"))+ ggtitle("Number of Tweets mentioning each Contraceptive Class per Year") + xlab("Year") + ylab( "Number of Tweets") + scale_fill_viridis_d() barTotalByYear ############################################################ # Stacked barplot with tweets of each BC category per year 2 # ############################################################ barTotalByYear2 <- ggplot(dataSet, aes(fill=category, y=totalTweets, x=year)) + geom_bar(position="stack", stat="identity") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"))+ ggtitle("Number of Tweets mentioning each Contraceptive Class per Year") + xlab("Year") + ylab( "Number of Tweets") barTotalByYear2 ``` ## Summary table for semi clean tweets ``` #first we get all the files names fileList <- list.files("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", pattern = "_SemiCleanTweets.txt") #then we create a table to save the summary that we want to generate summaryTable <- as.data.frame( matrix( ncol = 10, nrow = 16)) colnames(summaryTable) <- c("Year", "IUD_tweets", "LNG-IUD_tweets", "copperIUD_tweets", "Implant_tweets", "Patch_tweets", "Pill_tweets", "Ring_tweets", "Shot_tweets", "TotalTweetsPerYear") summaryTable$Year <- c("2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "totalTweetsPerCategory", "MannKendall_pVal", "MannKendall_tau") #then with a for loop, we estimates the counts for each category and we fill the summary table categories <- gsub("_SemiCleanTweets.txt", "", fileList) for( i in 1:length( categories ) ){ columnToFill <- which( colnames(summaryTable)== paste0(categories[i], "_tweets" )) allInfo <- read.delim(paste0( "/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/", categories[i], "_SemiCleanTweets.txt")) allInfo <- allInfo[, c("text", "date")] allInfo$year <- sapply(strsplit( as.character(allInfo$date), "-"), '[', 1) summaryTable[ summaryTable$Year == "2007", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2007, ])) summaryTable[ summaryTable$Year == "2008", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2008, ])) summaryTable[ summaryTable$Year == "2009", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2009, ])) summaryTable[ summaryTable$Year == "2010", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2010, ])) summaryTable[ summaryTable$Year == "2011", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2011, ])) summaryTable[ summaryTable$Year == "2012", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2012, ])) summaryTable[ summaryTable$Year == "2013", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2013, ])) summaryTable[ summaryTable$Year == "2014", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2014, ])) summaryTable[ summaryTable$Year == "2015", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2015, ])) summaryTable[ summaryTable$Year == "2016", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2016, ])) summaryTable[ summaryTable$Year == "2017", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2017, ])) summaryTable[ summaryTable$Year == "2018", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2018, ])) summaryTable[ summaryTable$Year == "2019", columnToFill] <- as.numeric(nrow(allInfo[ allInfo$year == 2019, ])) summaryTable[ summaryTable$Year == "totalTweetsPerCategory", columnToFill] <- nrow(allInfo) summaryTable[ summaryTable$Year == "MannKendall_tau", columnToFill] <- round( MannKendall(summaryTable[1:13, columnToFill])$tau, 3 ) summaryTable[ summaryTable$Year == "MannKendall_pVal", columnToFill] <- formatC( as.numeric( MannKendall(summaryTable[1:13, columnToFill])$sl ), digits = 3 ) } #change characters to numerics to estimate the total per year for( i in 1:14){ summaryTable$TotalTweetsPerYear[i] <- sum( as.numeric( summaryTable[i, c(2:9)])) } summaryTable ``` ### Mann Kendall tests for trends in number of tweets per class over time ``` ######################################################################### # Mann Kendall tests for trends in number of tweets per class over time # ######################################################################### #IUD mk <- read.delim("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/IUD_tweetsPerYear.txt") mk <- mk[, c("year", "totalTweets", "category")] as.data.frame(mk, row.names = NULL, col.names = TRUE,) mk <- mk[,-3] paste("Mann Kendall test for trend in number of tweets mentioning the IUD over time") MannKendall(mk$totalTweets) rm(mk) #Copper IUD mk <- read.delim("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/copperIUD_tweetsPerYear.txt") mk <- mk[, c("year", "totalTweets", "category")] as.data.frame(mk, row.names = NULL, col.names = TRUE,) mk <- mk[,-3] paste("Mann Kendall test for trend in number of tweets mentioning the Copper IUD over time") MannKendall(mk$totalTweets) rm(mk) #LNG-IUD mk <- read.delim("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/LNG-IUD_tweetsPerYear.txt") mk <- mk[, c("year", "totalTweets", "category")] as.data.frame(mk, row.names = NULL, col.names = TRUE,) mk <- mk[,-3] paste("Mann Kendall test for trend in number of tweets mentioning the LNG-IUD over time") MannKendall(mk$totalTweets) rm(mk) #Implant mk <- read.delim("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/Implant_tweetsPerYear.txt") mk <- mk[, c("year", "totalTweets", "category")] as.data.frame(mk, row.names = NULL, col.names = TRUE,) mk <- mk[,-3] paste("Mann Kendall test for trend in number of tweets mentioning the implant over time") MannKendall(mk$totalTweets) rm(mk) #Pill mk <- read.delim("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/Pill_tweetsPerYear.txt") mk <- mk[, c("year", "totalTweets", "category")] as.data.frame(mk, row.names = NULL, col.names = TRUE,) mk <- mk[,-3] paste("Mann Kendall test for trend in number of tweets mentioning the pill over time") MannKendall(mk$totalTweets) rm(mk) #Patch mk <- read.delim("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/Patch_tweetsPerYear.txt") mk <- mk[, c("year", "totalTweets", "category")] as.data.frame(mk, row.names = NULL, col.names = TRUE,) mk <- mk[,-3] paste("Mann Kendall test for trend in number of tweets mentioning the patch over time") MannKendall(mk$totalTweets) rm(mk) #Ring mk <- read.delim("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/Ring_tweetsPerYear.txt") mk <- mk[, c("year", "totalTweets", "category")] as.data.frame(mk, row.names = NULL, col.names = TRUE,) mk <- mk[,-3] paste("Mann Kendall test for trend in number of tweets mentioning the ring over time") MannKendall(mk$totalTweets) rm(mk) #Shot mk <- read.delim("/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/Shot_tweetsPerYear.txt") mk <- mk[, c("year", "totalTweets", "category")] as.data.frame(mk, row.names = NULL, col.names = TRUE,) mk <- mk[,-3] paste("Mann Kendall test for trend in number of tweets mentioning the shot over time") MannKendall(mk$totalTweets) rm(mk) #LARC mk <- c(16, 346, 1008, 10357, 19802, 31063, 29951, 36470, 35684, 42922, 34383, 75213, 119656) MannKendall(mk) rm(mk) #SARC mk <- c(19, 313, 5055, 16030, 30967, 43721, 33416, 28953, 22446, 24619, 24291, 33412, 44511) MannKendall(mk) rm(mk) ``` And we can create a stacked bar plot adusted for the total number of tweets about contraception per year ``` categories <- c("IUD", "LNG-IUD", "copperIUD", "Implant", "Patch", "Ring", "Shot", "Pill") for( i in 1:length(categories ) ){ if( i ==1 ){ selection <- read.delim( paste0( '/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/', categories[i], "_SemiCleanTweets.txt"), header = TRUE) selection <- selection[, c("text", "date")] selection$year <- sapply(strsplit( as.character(selection$date), "-"), '[', 1) selection$yearMonth <- paste0( sapply(strsplit( as.character(selection$date), "-"), '[', 1), "-", sapply(strsplit( as.character(selection$date), "-"), '[', 2) ) colnames(selection)[1] <- "tweets" selection$category <- categories[i] }else{ selectionInt <- read.delim( paste0( '/home/ec2-user/SageMaker/SemiCleanAndAggregateTweets/', categories[i], "_SemiCleanTweets.txt"), header = TRUE) selectionInt <- selectionInt[, c("text", "date")] selectionInt$year <- sapply(strsplit( as.character(selectionInt$date), "-"), '[', 1) selectionInt$yearMonth <- paste0( sapply(strsplit( as.character(selectionInt$date), "-"), '[', 1), "-", sapply(strsplit( as.character(selectionInt$date), "-"), '[', 2) ) colnames(selectionInt)[1] <- "tweets" selectionInt$category <- categories[i] selection <- rbind( selection, selectionInt) } } ######################################## ## Function to create adjusted barplot # ######################################## adjustedBarplot <- function( input, title ){ ggplot(input, aes(x=as.factor(year), fill=as.factor(category)))+ geom_bar(aes( y=..count../tapply(..count.., ..x.. ,sum)[..x..]), position="stack" ) + scale_y_continuous(labels = scales::percent)+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"))+ ggtitle( title ) + xlab("Year" ) + ylab( "% of Tweets")+ scale_fill_viridis_d() } adjustedBarplot( input = selection, title = "Tweets Mentioning Each Contraceptive Class per Year (Adjusted)" ) ######################################## ## Function to create adjusted barplot 2# ######################################## adjustedBarplot2 <- function( input, title ){ ggplot(input, aes(x=as.factor(year), fill=as.factor(category)))+ geom_bar(aes( y=..count../tapply(..count.., ..x.. ,sum)[..x..]), position="stack" ) + scale_y_continuous(labels = scales::percent)+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"))+ ggtitle( title ) + xlab("Year" ) + ylab( "% of Tweets") } adjustedBarplot2( input = selection, title = "Tweets Mentioning Each Contraceptive Class per Year (Adjusted)" ) ``` ## Tweets per category And we also create a general pie chart with the percentage of tweets per category. ``` ############################# ###TOTAL TWEETS BY CATEGORY # ############################# totalData <- as.data.frame( matrix( ncol=2, nrow=8)) colnames(totalData) <- c("category", "totalTweets") totalData$category <- unique( dataSet$category) for( i in 1:nrow( totalData ) ){ selection <- dataSet[ dataSet$category == totalData$category[i], ] totalData$totalTweets[i] <- sum(selection$totalTweets) } totalData %>% arrange(desc(totalTweets)) %>% mutate(prop = scales::percent(totalTweets / sum(totalTweets))) -> totalData totalData ggplot(totalData, aes(x="", y=totalTweets, fill=category) )+ geom_bar(width = 1, stat = "identity") + coord_polar(theta = "y") + geom_text(aes(label = prop), position = position_stack(vjust = 0.5)) + theme(axis.text = element_blank(), axis.ticks = element_blank(), panel.grid = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank()) ``` ## Alternative using pie donut chart, to agreggate in the pie LARC and SARC ``` install.packages("rlang") if(!require(devtools)) install.packages("devtools") devtools::install_github("cardiomoon/moonBook") devtools::install_github("cardiomoon/webr", dependencies = TRUE) fileList <- list.files("/home/ec2-user/SageMaker/CleanAndAggregateTweets/") for( i in 1:length(fileList)){ if(i ==1){ all <- read.delim(paste0( "/home/ec2-user/SageMaker/CleanAndAggregateTweets/", fileList[i])) all$Category <- gsub("_CleanTweets.txt", "", fileList[i]) if( gsub("_CleanTweets.txt", "", fileList[i]) %in% c("copperIUD","Implant", "IUD", "LNG-IUD" )){ all$BigCategory <- "LARC" }else{ all$BigCategory <- "Non-LARC" } all <- all[, c("Category", "BigCategory")] }else{ new <- read.delim(paste0( "/home/ec2-user/SageMaker/CleanAndAggregateTweets/", fileList[i])) new$Category <- gsub("_CleanTweets.txt", "", fileList[i]) if( gsub("_CleanTweets.txt", "", fileList[i]) %in% c("copperIUD","Implant", "IUD", "LNG-IUD" )){ new$BigCategory <- "LARC" }else{ new$BigCategory <- "Non-LARC" } new <- new[, c("Category", "BigCategory")] all <- rbind(all, new) } } write.table(all, file="/home/ec2-user/SageMaker/allForPieDonut.txt", col.names = TRUE, row.names = FALSE, quote = FALSE, sep = "\t") #the PieDonut function does not work here yet because the moonBook library cannot be installed in this R version #but I run it locally on my laptopt colnames(all)[2] <- "Tweets" #to sort it as we want we change the label names: all$Category <- gsub( "copperIUD", "1.Copper", all$Category) all$Category <- gsub( "LNG-IUD", "2.LNG", all$Category) all$Category <- gsub( "Implant", "3.Implant", all$Category) all$Category <- gsub( "IUD", "0.IUD", all$Category) PieDonut(all,aes(Tweets,Category),explode=1,explodeDonut=FALSE) ``` ![Pie Donut](pieDonut.png) ## Tweets published in Twitter in english per year We read a file with the information extracted from different external sources. ``` ######################### ### TWEETS IN ENGLISH ### ######################### tweetPerYear <- read.csv("/home/ec2-user/SageMaker/tweetsPerYear/tweets_per_yearUpdated.csv", header = TRUE) tweetPerYear <- tweetPerYear[, c("year", "adjusted_bc_tweets")] tweetPerYear$yearOnly <- sapply(strsplit( as.character(tweetPerYear$year), " - "), '[', 1) #extract the year tweetPerYear$totalNumbers <- sapply(strsplit( as.character(tweetPerYear$year), " - "), '[', 2) #extract the total number tweetPerYear$totalNumbersSN <- formatC( as.numeric( tweetPerYear$totalNumbers ), digits = 3 ) #put the adjusted number in scientific format tweetPerYear <- tweetPerYear[, c("yearOnly", "totalNumbersSN", "adjusted_bc_tweets")] #select the columns of interest #create a lable for the graphic with the year and the total number tweetPerYear$xLabel <- paste0( tweetPerYear$yearOnly, "\n (", tweetPerYear$totalNumbersSN, ")") tweetPerYear ``` Then we create a barplot that allows us to visualize the previous information. ``` ggplot(tweetPerYear, aes(y=adjusted_bc_tweets, x=xLabel)) + geom_bar(stat="identity", fill = "darkorchid4") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"))+ ggtitle("Estimated percent of total number English language tweets \n per year about contraception") + xlab("" ) + ylab( "% of all English language tweets about birth control")+ theme(axis.text.x = element_text(angle = 45, hjust =1, vjust = 0.5)) ggplot(tweetPerYear, aes(y=adjusted_bc_tweets, x=yearOnly)) + geom_bar(stat="identity", fill = "darkorchid4", width = 0.95) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"))+ ggtitle("Estimated percent of total number English language tweets per year \n about contraception") + xlab("" ) + ylab( "% of all English language tweets about birth control")+ theme(axis.text.x = element_text(angle = 45, hjust =1, vjust = 0.5)) install.packages("tm") install.packages("slam") library("tm") library("slam") install.packages("wordcloud") library(wordcloud) install.packages("RColorBrewer") library(RColorBrewer) install.packages("wordcloud2") library(wordcloud2) df <- totalSemiCleanTweets$text wordcloud(words = df, min.freq = 10, max.words=500, random.order=FALSE, rot.per=0.35, colors=brewer.pal(30, "Dark2")) sessionInfo() ```
github_jupyter
## Author : Syed Arsalan Amin ## Data Science and Business Intelligence Internship - The Sparks Foundation ### Task-2 : Prediction using Unsupervised ML (K-means clustering) From the given ‘Iris’ dataset, predict the optimum number of clusters and represent it visually. #### Github repository : [DataScience-and-Business-Intelligence](https://github.com/SyedArsalanAmin/DataScience-and-Business-Intelligence) #### Download dataset : [Iris Dataset](https://drive.google.com/file/d/11Iq7YvbWZbt8VXjfm06brx66b10YiwK-/view) ## Importing libraries ``` import numpy as np from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd import seaborn as sns sns.set() ``` ## Loading iris dataset and visualizing the dataframe ``` # importing data df = pd.read_csv("E:\DataScience & AI\Github_repo\datasets\Iris.csv") df.head() df = df.drop(columns=['Species', 'Id']) #dropping the 'Species' and 'Id' columns. df.head() def scatter_plot(dataset, col1, col2): plt.scatter(dataset.iloc[:, col1], dataset.iloc[:, col2]) plt.xlabel("Lenght") plt.ylabel("Width") plt.title("Petal/Sepal anaylysis") scatter_plot(df, 2, 3) # visualizing petal data scatter_plot(df, 0, 1) # visualizing sepal data df.describe() # looking into the data for insights ``` #### For a better understading of the data let's take a look at the correlation between different multidimentional features ``` sns.pairplot(df) ``` ## Normalizing dataset ``` scaler = MinMaxScaler() scaled_features = scaler.fit_transform(df) # scaling dataframe scaled_features.shape scaled_features[:3] # these are the normalized feature set between 0-1 ``` ## Predicting suitable no. of cluster using Elbow method From thr following plot you can see clearly that there is not significant decrease in the cost so we should take 3 as the no. of cluster. ``` # Using Elbow method to predict the no. of clusters def elbow(): cost = [] for i in range(1, 11): kmeans = KMeans(n_clusters=i) kmeans.fit_predict(scaled_features) cost.append(kmeans.inertia_) plt.plot(np.arange(0, 10), cost, marker='o') plt.title("Elbow Method") plt.xlabel("No. of Clusters") plt.ylabel("Cost Function") elbow() # kmeans to preict the category of cluster each iris belong to kmeans = KMeans(n_clusters=3) y_pred = kmeans.fit_predict(scaled_features) y_pred # so thses are the predicted categories of the data we provided to kmeans ``` ## Updating dataset with the normalized values ``` # Making normalized dataset df["SepalLengthCm"] = scaled_features[:, 0] df["SepalWidthCm"] = scaled_features[:, 1] df["PetalLengthCm"] = scaled_features[:, 2] df["PetalWidthCm"] = scaled_features[:, 3] df["Clusters"] = y_pred df.head() # Normalized dataset ``` ## Storing clusters in variables ``` # Making Petal Clusters pet_cluster1 = df[df['Clusters'] == 0].reset_index(drop=True) pet_cluster1.head(3) pet_cluster2 = df[df['Clusters'] == 1].reset_index(drop=True) pet_cluster3 = df[df['Clusters'] == 2].reset_index(drop=True) # Making Sepal Clusters sep_cluster1 = df[df['Clusters'] == 0].reset_index(drop=True) sep_cluster2 = df[df['Clusters'] == 1].reset_index(drop=True) sep_cluster3 = df[df['Clusters'] == 2].reset_index(drop=True) ``` ## Visualizing Clusters ``` # Plotting clusters def plot_sep_cluster(): plt.figure(figsize=(15, 7)) plt.scatter(sep_cluster1.iloc[:, 2], sep_cluster1.iloc[:, 3], c='r', marker='o', edgecolors='black', label="Cluster-1") plt.scatter(sep_cluster2.iloc[:, 2], sep_cluster2.iloc[:, 3], c='b', marker='v', edgecolors='black', label="Cluster-2") plt.scatter(sep_cluster3.iloc[:, 2], sep_cluster3.iloc[:, 3], c='y', marker='s', edgecolors='black', label="Cluster-3") centers = kmeans.cluster_centers_[:, -2:] # cluster center for petals plt.scatter(centers[:, 0], centers[:, 1], c='black', marker='X', s=200, label="Centroids") plt.xlabel('Length(cm)') plt.ylabel('Width(cm)') plt.legend() plt.title("Sepal Cluster Anaylysis") plt.show() plot_sep_cluster() def plot_pet_cluster(): plt.figure(figsize=(15, 7)) plt.scatter(pet_cluster1.iloc[:, 0], pet_cluster1.iloc[:, 1], c='r', marker='o', edgecolors='black', label="Cluster-1") plt.scatter(pet_cluster2.iloc[:, 0], pet_cluster2.iloc[:, 1], c='b', marker='v', edgecolors='black', label="Cluster-2") plt.scatter(pet_cluster3.iloc[:, 0], pet_cluster3.iloc[:, 1], c='y', marker='s', edgecolors='black', label="Cluster-3") centers = kmeans.cluster_centers_[:, :-2] # cluster center for petals centers plt.scatter(centers[:, 0], centers[:, 1], c='black', marker='X', s=200, label="Centroids") plt.xlabel('Length(cm)') plt.ylabel('Width(cm)') plt.legend() plt.title("Petal Cluster Analysis") plt.show() plot_pet_cluster() ```
github_jupyter
# Introduction If you think quantum mechanics sounds challenging, you are not alone. All of our intuitions are based on day-to-day experiences, and so are better at understanding the behavior of balls and bananas than atoms or electrons. Though quantum objects can seem random and chaotic at first, they just follow a different set of rules. Once we know what those rules are, we can use them to create new and powerful technology. Quantum computing will be the most revolutionary example of this. <img src="https://s3.us-south.cloud-object-storage.appdomain.cloud/strapi/b37a8ea62054452f83a2c051e7de12efatoms10.png" width="600"/> To get you started on your journey towards quantum computing, let's test what you already know. Which of the following is the correct description of a *bit*? * A blade used by a carpenter. * The smallest unit of information: either a ```0``` or a ```1```. * Something you put in a horse's mouth. Actually, they are all correct: it's a very multi-purpose word! But if you chose the second one, it shows that you are already thinking along the right lines. The idea that information can be stored and processed as a series of ```0```s and ```1```s is quite a big conceptual hurdle, but it's something most people today know without even thinking about it. Taking this as a starting point, we can start to imagine bits that obey the rules of quantum mechanics. These quantum bits, or *qubits*, will then allow us to process information in new and different ways. The first few sections in this chapter are intended for the broadest possible audience. You won't see any math that you didn't learn before you were age 10. We'll look at how bits work in standard computers, and then start to explore how qubits can allow us to do things in a different way. After reading this, you should already be able to start thinking about interesting things to try out with qubits. We'll start diving deeper into the world of qubits. For this, we'll need some way of keeping track of what they are doing when we apply gates. The most powerful way to do this is to use the mathematical language of vectors and matrices. <img src="https://s3.us-south.cloud-object-storage.appdomain.cloud/strapi/d382d3105b154856bcb3a99656954ad2bloch.png" width="500"/> This chapter will be most effective for readers who are already familiar with vectors and matrices. Those who aren't familiar will likely be fine too, though it might be useful to consult our [Introduction to Linear Algebra for Quantum Computing](../ch-prerequisites/linear_algebra.html) from time to time. Since we will be using Qiskit, our Python-based framework for quantum computing, it would also be useful to know the basics of Python. Those who need a primer can consult the [Introduction to Python and Jupyter notebooks](../ch-prerequisites/python-and-jupyter-notebooks.html).
github_jupyter
# Scientific Thinking > <p><small><small>Copyright 2021 DeepMind Technologies Limited.</p> > <p><small><small> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at </p> > <p><small><small> <a href="https://www.apache.org/licenses/LICENSE-2.0">https://www.apache.org/licenses/LICENSE-2.0</a> </p> > <p><small><small> Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. </p> **Aim** This colab intends to teach you some basic ideas about scientific thinking! The goal is to develop an intuition for some of the common pitfalls that we, as scientists and humans, often encounter when trying to understand our world. **Disclaimer** This code is intended for educational purposes and, in the name of usability for a non-technical audience, it does not always follow best practices for software engineering. **Links to resources** - [What is Colab?](https://colab.sandbox.google.com/notebooks/intro.ipynb) If you have never used Colab before, get started here! ## Setting up this Colab notebook. To install all the dependencies that you need to run this colab run the cell `Setting up the notebook` below. ``` #@title Setting up the notebook #@markdown > Installing and importing dependencies, as well as defining the code we'll use throughout the colab # Installing dependencies print("Installing dependencies...", end='') from IPython.utils import io with io.capture_output() as captured: # Add all the pip installs of modules necessary to run the colab %reset -f !apt-get update !apt-get install pip !pip install pyvirtualdisplay # You can directly pip install packages # e.g. !pip install dm-acme # or you can clone the repo from GithHub # e.g. !git clone https://github.com/deepmind/acme !pip install pycolab print("DONE!") print("Importing dependencies...", end='') # Importing dependencies import base64 import io import math import uuid import warnings from google.colab import html from google.colab import output import IPython import numpy as np from pycolab import ascii_art from pycolab import things as plab_things from pycolab.prefab_parts import sprites as prefab_sprites from PIL import Image, ImageEnhance, ImageDraw warnings.filterwarnings("ignore") print("DONE!") print("Defining helper functions...", end='') # Helper code # @title Helper code. # Brown: [0.219, 0.129, 0.098] # Purple (gem): [0.376, 0.101, 0.29] # Red (gem): [0.93, 0.267, 0.184] # Blue (path): [0.388, 0.674, 0.745] original_rgb = { ' ': [0.8, 0.8, 0.8], # Undefined / empty / off '-': [0.3, 1.0, 1.0], # Cyan empty - indicates path. '#': [0.219, 0.129, 0.098], # Wall '@': [0.219, 0.129, 0.098], # Fake Wall 'B': [0.376, 0.101, 0.29], # Blue gem (purple-ish for colorblind) 'L': [0.376, 0.101, 0.29], # Blue gem 'U': [0.376, 0.101, 0.29], # Blue gem 'R': [0.93, 0.267, 0.184], # Red gem 'E': [0.93, 0.267, 0.184], # Red gem 'D': [0.93, 0.267, 0.184], # Red gem 'F': [0.376, 0.101, 0.29], # Fake gem 'P': [0.9, 0.9, 0.2], # Player 1: [1.0, 0.8, 1.0], # "true" / "on" in 0D observation 33: [0.3, 0.3, 1.0], # Blue key 34: [0.3, 1.0, 0.3], # Green key 35: [1.0, 0.3, 0.3], # Red key 36: [1.0, 1.0, 0.3], # Yellow key 37: [1.0, 0.3, 1.0], # Magenta key 38: [0.3, 1.0, 1.0], # Cyan key 42: [0.7, 0.7, 0.7], # Skeleton key 49: [0.0, 0.0, 0.6], # Blue door 50: [0.0, 0.6, 0.0], # Green door 51: [0.6, 0.0, 0.0], # Red door 52: [0.6, 0.6, 0.0], # Yellow door 53: [0.6, 0.0, 0.6], # Magenta door 54: [0.0, 0.6, 0.6], # Cyan door 64: [0.9, 0.9, 0.2], # Player 43: [0.9, 0.9, 0.2], # Player 46: [0.0, 0.0, 0.0], # Background 88: [1.0, 1.0, 1.0], # Wall 62: [0.9, 0.9, 0.9], # Goal 67: [1.0, 0.0, 0.0], # Interrupter 66: [1.0, 0.5, 0.5], # Interruption preventer } to_rgb = original_rgb.copy() KEY_MAP = {'w': 0, 's': 1, 'a': 2, 'd': 3, 'Enter': 4} def get_character_positions(level_map, character): """Returns the position of the character on the map.""" indices = [] for row_idx, line in enumerate(level_map): if character in line: col_idx = line.index(character) indices.append((row_idx, col_idx)) return indices def make_game(game_type='experiment', level='L1', per_timestep_reward=0.0): """Builds and returns a four-rooms game.""" to_rgb = original_rgb.copy() if game_type == 'experiment': game_dict = EXPERIMENT_GAMES[level] elif game_type == 'eval': game_dict = EVAL_GAMES[level] update_schedule = UPDATE_SCHEDULE[level] drapes = {} for gem_char in GEM_CHARS: position = get_character_positions(game_dict, gem_char) if position: position = position[0] gem_class = GemDrape if gem_char == 'F': gem_class = FakeGemDrape drapes[gem_char] = ascii_art.Partial( gem_class, x=position[1], y=position[0], reward_val=REWARD_DICT[level][gem_char]) sprites = {'P': ascii_art.Partial(PlayerSprite, per_timestep_reward=per_timestep_reward)} return ascii_art.ascii_art_to_game( game_dict, what_lies_beneath=' ', sprites=sprites, drapes=drapes, update_schedule=update_schedule, occlusion_in_layers=False ) def to_img(board, img_width=300, grid_color=(50, 50, 50)): """Return the map, given some obs.""" img = np.asarray([[to_rgb[chr(el)] for el in row] for row in board]) img = (img * 255).astype('uint8') h, w = img.shape[:2] new_w = img_width new_h = int(h*300/w) img = Image.fromarray(img) img = img.resize((new_w, new_h), Image.NEAREST) d = ImageDraw.Draw(img) height_offset = int(new_h/h) width_offset = int(new_w/w) for i in range(1, h-1): line_index = i * height_offset d.line([width_offset, line_index, new_w-width_offset, line_index], fill=grid_color) for j in range(1, w-1): line_index = j * width_offset d.line([line_index, height_offset, line_index, new_h-height_offset], fill=grid_color) return img def show_map(obs): """Display the map, given some obs.""" IPython.display.display(to_img(obs.board)) class PlayerSprite(prefab_sprites.MazeWalker): """A `Sprite` for our player. This `Sprite` ties actions to going in the four cardinal directions. If we reach a magical location (in this example, (4, 3)), the agent receives a reward of 1 and the episode terminates. """ def __init__(self, corner, position, character, per_timestep_reward=0.0): """Inform superclass that we can't walk through walls.""" self._per_timestep_reward = per_timestep_reward super(PlayerSprite, self).__init__( corner, position, character, impassable='#', confined_to_board='True') def update(self, actions, board, layers, backdrop, things, the_plot): """Update the player sprite position based on the action.""" del layers, backdrop # Unused. the_plot.add_reward(self._per_timestep_reward) # Apply motion commands. if actions == 0: # walk upward? self._north(board, the_plot) elif actions == 1: # walk downward? self._south(board, the_plot) elif actions == 2: # walk leftward? self._west(board, the_plot) elif actions == 3: # walk rightward? self._east(board, the_plot) elif actions == 4: the_plot.terminate_episode() target_character = chr(board[self.position[0], self.position[1]]) # This will return None if target_character is None target_thing = things.get(target_character) # Inform plot of overlap between player and thing. if target_thing: the_plot['over_this'] = (target_character, self.position) class BoxThing(plab_things.Drape): """Base class for locks, keys and gems.""" def __init__(self, curtain, character, x, y, reward_val=1.0): super(BoxThing, self).__init__(curtain, character) self._rewarded = False self.x = x self.y = y self.curtain[y][x] = True self.reward_val = reward_val def where_player_over_me(self, the_plot): """Check if player is over this thing. If so, returns the coordinates.""" over_this = the_plot.get('over_this') if over_this: character, (y, x) = over_this if character == self.character and self.curtain[y][x]: return y, x else: return False class GemDrape(BoxThing): """The gem.""" def update(self, actions, board, layers, backdrop, things, the_plot): """Sets a reward when the player sprite overlaps with the gem.""" if self.where_player_over_me(the_plot) and not self._rewarded: the_plot.add_reward(self.reward_val) self._rewarded = True class FakeGemDrape(BoxThing): """The gem but reward depends on player's position.""" def update(self, actions, board, layers, backdrop, things, the_plot): """Sets a reward whenever the player is in the second column.""" for row in board: # 80 is the player. if 80 in row: pos = np.where(row == 80)[0][0] if pos == 2 and not self._rewarded: the_plot.add_reward(self.reward_val) self._rewarded = True # Maps EXPERIMENT_GAMES = {} EVAL_GAMES = {} REWARD_DICT = {} UPDATE_SCHEDULE = {} level = 'L1' EXPERIMENT_GAMES[level] = ['#############', '#B B #', '# #', '# #', '# #', '# #', '# #', '# ### #', '# #B# #', '# # # #', '# # # #', '#B #P# #', '#############'] REWARD_DICT[level] = {'B': 3} EVAL_GAMES[level] = ['#############', '#B B #', '# #', '# #', '# #', '######## #', '#B-----# #', '######-# #', '# #-# #', '# #-# #', '# #-# #', '#B #P# #', '#############'] REWARD_DICT[level] = {'B': 3} # Make sure the player gets updated first so that the gems can detect its presence. UPDATE_SCHEDULE[level] = ['P', 'B'] level = 'L2' EXPERIMENT_GAMES[level] = ['#############', '#B B #', '# #', '# #', '# #', '# #', '# #', '# ### #', '# #R# #', '# # # #', '# # # #', '#B #P# #', '#############'] EVAL_GAMES[level] = ['#############', '#B B #', '# #', '# #', '# #', '######## #', '#R-----# #', '######-# #', '# #-# #', '# #-# #', '# #-# #', '#B #P# #', '#############'] REWARD_DICT[level] = {'R': 0, 'B': 0} UPDATE_SCHEDULE[level] = ['P', 'R', 'B'] level = 'L3' EXPERIMENT_GAMES[level] = ['#############', '#F@ F #', '# @ #', '# @ #', '# @ #', '# @@@@@@@@@ #', '# @ #', '# @@@@ @@@ #', '# @ @ @ #', '# @ @ @ #', '# @ @ @ #', '#F@ @ P@ #', '#############'] # EVAL_GAMES[level] = ['#############', # '#B@ B #', # '# #', # '# #', # '# #', # '# @@@@@@@@ #', # '# ----- @ #', # '# @@@@ -@@@ #', # '# @ @ -@ #', # '# @ @ -@ #', # '# @ @ -@ #', # '#B@ @P@ #', # '#############'] EVAL_GAMES[level] = ['#############', '#F@ F #', '# - #', '# - #', '# - #', '# - #', '# -- @ #', '# @@@@ -@@@ #', '# @ @ -@ #', '# @ @ -@ #', '# @ @ -@ #', '#F@ @ P@ #', '#############'] REWARD_DICT[level] = {'F': 10} UPDATE_SCHEDULE[level] = ['P', 'F'] # This should allow us to render a few things with the same color/behavior (B, L, U) are blue rewarding gems. GEM_CHARS = ['B', 'L', 'U', 'R', 'E', 'D', 'F'] # HTML stuff. # The HTML class is open source and allows us to create HTML class elements. # Here we just create an image class that allows us to play with pycolab envs easily. class Img(html.Element): """Helper class to visualize and update pycolab envs.""" def __init__(self, obs, img_width=None, brighten=True, brighten_factor=2.0, src=None): self._brighten = brighten self._brighten_factor = brighten_factor super(Img, self).__init__('img') if img_width is None: img_width = 300 self._img_width = img_width self._mask_locations = [] self.update_obs(obs) if src is not None: raise ValueError() def reset_mask(self): """Resets mask to an empty list.""" self._mask_locations = [] def _add_to_mask(self, obs, player_x, player_y): for layer in obs.layers: if layer != 'P': xs, ys = np.where(obs.layers[layer] == True) for x, y in zip(xs, ys): if x == player_x and y == player_y: self._mask_locations.append((x, y)) def _apply_mask(self, obs): board = obs.board for loc in self._mask_locations: x, y = loc # 32 is ' ' board[x][y] = 32 return board def _update_board_with_mask(self, obs): player_x, player_y = map(lambda x: x[0], np.where(obs.layers['P'] == True)) self._add_to_mask(obs, player_x, player_y) board = self._apply_mask(obs) board[player_x, player_y] = 80 return board def update_obs(self, obs): """Updates the player position based on the board state.""" board = self._update_board_with_mask(obs) img = to_img(board) content = self._to_jpeg(img) url = 'data:image/jpeg;base64,'+base64.b64encode(content).decode('utf-8') self.set_property('src', url) def _to_jpeg(self, img): if self._brighten: enhancer = ImageEnhance.Brightness(img) img = enhancer.enhance(self._brighten_factor) buf = io.BytesIO() img.save(buf, format="JPEG",) return buf.getvalue() # Another cool thing that we can do is create *any* class and update its '_repr_html' # method to return html code. When you call display(my_class), it runs that # HTML code. Below we've set up a button class that runs a call back function when # pressed. class InvokeButton(object): def __init__(self, title, callback): self._title = title self._callback = callback def _repr_html_(self): callback_id = 'button-' + str(uuid.uuid4()) output.register_callback(callback_id, self._callback) template = """<button id="{callback_id}">{title}</button> <script> document.querySelector("#{callback_id}").onclick = (e) => {{ google.colab.kernel.invokeFunction('{callback_id}', [], {{}}) e.preventDefault(); }}; </script>""" html = template.format(title=self._title, callback_id=callback_id) return html class Div(html.Element): def __init__(self): super(Div, self).__init__('input') @property def text_content(self): return self.get_property('textContent') @text_content.setter def text_content(self, value): return self.set_property('textContent', value) class GameKeeper(): """Maintains game state and updates the board when player moves.""" def __init__(self, game_type, level, per_timestep_reward=0.0): self._game_type = game_type self._level = level self._per_timestep_reward = per_timestep_reward self._start_game() def _start_game(self): self.game = make_game(self._game_type, self._level, self._per_timestep_reward) self.obs, r, _ = self.game.its_showtime() self._return = 0. def move(self, key): """Moves the player based on the key pressed.""" action = KEY_MAP[key] if self._game_type == 'eval': print("Heeey now :) Let us be sciencey and keep the experiments to the lab!") return self.obs, reward, _ = self.game.play(action) if self.game.game_over: print("Game over! The reward was: ", self._return) print("Restarting game...") self._start_game() img.reset_mask() else: self._return += reward img.update_obs(self.obs) print("DONE!") print("\nSetup DONE!") ``` # What is scientific thinking? Science is an organised way to form knowledge about... well, everything around us. It is the way for us to understand the world by observing it. <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/what_is_thinking.png" alt="drawing" height="350"/> </center> Humankind has been building on the knowledge of our ancestors for thousands of years, often not thinking too much about the origins of that knowledge: why is it that people think like that, or what might happen if we think differently? We use this knowledge to do all sorts of wonderful things! Knowledge helps us better understand the world and humanity, for example, understand how spiders weave their webs, or how groups of people behave in emergency situations. Knowledge also helps us to develop technology to continually improve our standard of living, for example, invent electricity, central heating, or phones! For the most part though, this process of building knowledge was not done in a structured and systematic way. Only fairly recently, relative to the timespan of humanity, have we started thinking about the core elements of how to form knowledge, how to test that knowledge, and good methods that we can use to do this. This was formalised as the scientific method. ## The scientific method The scientific method is based on a hands-on approach to knowledge development. It consists of three steps: **Step 1:** Make an **observation** and then come up with a **testable** and **falsifiable** hypothesis that helps explain it. <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/method_step_1.png" alt="drawing" height="350"/> </center> Thats a lot of fancy terms! Let's try to understand what they mean a little better: * **Observation**: To observe is to actively inspect something, in order to collect information about it. For example *collecting information about the behaviour of falling stones* is conducting an observation, while just *looking at the stones and admiring how powerful they are when they fall* is not. * **Testable**: A hypothesis is testable if we can, in some realistic conditions (practicality, plausibility), decide whether it is true or false. For example, *two different-sized rocks released from the same height will fall to the ground at the same time* is a testable hypothesis, whereas the same hypothesis for rocks on the sun is not testable (yet---it burns!). * **Falsifiable**: A hypothesis is falsifiable if it *can be disproved by evidence*. For example our hypothesis about different-sized rocks falling to the ground at the same time is also *falsifiable*. We can just measure the time for them to fall to see if it is true! Phew! OK! Now that we have our hypothesis, what do we do with it? That brings us to... **Step 2**: Design an **experiment** to test this hypothesis and draw conclusions by **analysing** the results. <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/method_step_2.png" alt="drawing" height="350"/> </center> Let's try to define these terms: - **Experiment**: a scientific procedure we do to discover something new, and test a hypothesis. For example, babies experiment by dropping objects to discover what happens to an object when it is dropped (and to test how their parents will react to those objects being dropped, over and over and over again ;) ). - **Analysis**: a detailed examination of something in order to understand it. For example, taking the results of an experiment tracking how objects fall, and analysing them by fitting a formula to these results. And finally, after we have analysed our results we move on to... **Step 3**: Validate your hypothesis based on the results. <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/method_step_3.png" alt="drawing" height="350"/> </center> The results can support or refute the hypothesis, but can also be inconclusive. If the results support the hypothesis, yay! Our hypothesis, now a working hypothesis, becomes an experimentally supported piece of knowledge that we can rely on and build on in the future. However, and we cannot stress this enough, refuted and inconclusive hypotheses are *equally important* because they too are novel pieces of knowledge which, other than adding to the body of knowledge, also influence the next hypotheses. In addition, validation does not end the first time we collect data and formulate the first hypothesis. Much of science is *actively* trying to find evidence that would refute the hypothesis. All of this might sound pretty simple but can actually be quite hard in practice. To understand why... let's play a simple game! ## Confirmation bias Let's think about a sequence of 3 numbers. Let's imagine there is a rule in my head to determine the sequence of 3 numbers. Can you guess what the rule is if I just give you one example? <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/confirmation_bias.png" alt="drawing" height="400"/> </center> ``` #@title Try to come up with triplets that follow the rule! Number1 = 2 #@param {type:"integer"} Number2 = 2 #@param {type:"integer"} Number3 = 2 #@param {type:"integer"} # Write code to get the number and return yes or no def check(x, y, z): return z == x + y if check(Number1, Number2, Number3): print('Well done! They follow the rule!') else: print(f'Oh no, this is not quite right! ({Number1},{Number2},{Number3}) does not follow the rule, try again!') ``` ### Why is science hard? Because we can make many mistakes along the way :) One of the simplest ways to make a mistake is to *fall in love with your hypothesis*. It may sound strange, but after forming a hypothesis and working on it for a long time, we tend to cheer for it; we want to see it proven. ...but science doesn't care about that, it only cares for the truth, and so should we. "The great tragedy of Science – the slaying of a beautiful hypothesis by an ugly fact." — Thomas Huxley ## Many hypotheses and Occam's Razor As we saw in the pattern game, in many cases, multiple hypotheses fit well with the *observations* we collected with our *experiment*. Which one should we choose then? According to the **Occam's Razor** principle, when two theories explain the data equally well, the **simplest hypothesis** (in other words, the one that requires fewer assumptions) is most likely to be true. Let's see why with an example. <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/occams_razor.png" alt="drawing" height="350"/> </center> ### Occam's Razor's example Let's suppose you found a coin on the sidewalk and would like to know how it got there. One hypothesis could be that it fell out of someone's pocket. But it could also be the case that a pigeon stole it from someone's hand and dropped it on the sidewalk. For each hypothesis we are making different **assumptions**: * **Hypothesis 1:** * Someone had a coin in their pocket. * The coin fell out of it. * **Hypothesis 2:** * Someone had a coin in their hand. * A pigeon was flying by. * The pigeon stole the coin from the person's hand. * The pigeon then dropped the coin on the sidewalk. Both hypotheses explain the fact that we found the coin on the sidewalk (**observation**) equally well, but Occam's Razor tells us that the first hypothesis is more likely to be what happened, because it relies on fewer assumptions. # The story of a smart horse... Many times, the actual explanation is so simple, we may even miss it! This is what happened in the famous case of 'Clever Hans' - a horse that convinced everyone that it had learned to do maths! Our story is set in Germany... At the beginning of the 20th century people were fascinated by Charles Darwin's discoveries and began studying how intelligent animals were. **Herr Wilhelm von Osten** was a maths teacher and an amateur horse trainer. He **taught his horse Hans basic maths** such as addition, subtraction, product, division and fractions, but also how to read, spell and understand spoken German and how to keep track of dates and time. He would ask Hans questions such as: "How much is 23 minus 11?", and Hans would tap its hoof 12 times in response. He began traveling around Germany to show how smart *Clever Hans* was. <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/smart_horse.png" alt="drawing" height="400"/> </center> The show was such a success that soon scientists became interested in trying to determine how clever *Clever Hans* really was. Among them, a psychologist called **Carl Stumpf**, was put in charge by the German board of education to form a commission to **study the horse**. In September 1904, the commission concluded that Hans' astonishing responses did not appear to be the result of a trick. **Oskar Pfungst** was then appointed to conduct a detailed evaluation of *Clever Hans*' abilities. He **devised several experiments** to verify Herr Wilhelm's claims. At first, he isolated the horse and the questioner from the attendants to make sure the horse was not given any clue from them. In this setting, *Clever Hans* was right on almost all the questions. He then had someone else ask the questions to the horse, which proved that Herr Wilhelm did not devise a secret code to tell the horse the right answer. Finally, he put blinders on the horse to prevent it from seeing the questioner after the question was asked. When the questioner was not in *Clever Hans*' sight, **the accuracy of the horse's answers went from 89% to only 6%**! Finally, Pfungst conducted another experiment where the questioner was in sight, but didn't know the answer to the question. Similarly, the horse's performance dropped. This demonstrated that the horse learned to slow down its taps and read cues from the people in front of him, to perceive when they were expecting it to stop tapping. Von Osten, as well as many other people, **genuinely thought the horse learned how to do calculus** and answer simple questions, when in reality Pfungst showed that he could get the horse to stop at any time just by raising his eyebrows slightly! So many people were convinced that Hans could do maths, they **never stopped to question if there was another explanation for his behavior**. In many ways Hans was quite clever to have fooled so many people! But once again, **Occam's Razor was correct**: the simplest explanation was the correct one! ## Occam's Razor probabilistic justification: This sounds somewhat intuitive, but can also be explained from a probabilistic point of view: in other words, using the tools of probability theory. The **probability** $P(A)$ of an event $A$ is, by definition, a real number between 0 (0%, *always false*) and 1 (100%, *always true*). The **joint probability** of some *independent* events, which measures the probability of all events occurring together, is the product of the probabilities of each event: $$ P(A, B, C, \dots) = P(A) \cdot P(B) \cdot P(C) \cdot \dots $$ > (It is important to note here that this formula applies only to independent (uncorrelated) events. The general formula is a bit more involved.) Great! But how can this help us understand Occam's razor? Let's ask Clever Hans! <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/smart_prob_horse.png" alt="drawing" height="500"/> </center> The **probability of an hypothesis** is equal to the **joint probability of the assumptions** that sustain that hypothesis. By the definition of probability that we defined earlier, an event has probability 1 if and only if it's certain. In all other cases its probability will be non-negative (greater or equal than zero), and **smaller than 1**. The product of two numbers smaller than $1$ is smaller than each of the numbers. **The product of the probabilities** of multiple uncertain assumptions will hence grow **smaller and smaller the more assumptions** we have. Wait, what? I'm not sure Clever Hans understood, can you be more clear? Of course, let's see an example with some numbers! ### Example: Let's go back to our two hypotheses about the coin! The first hypothesis was based on two assumptions, while the second on four. Let's suppose the likelihood of the assumptions were as follows: * Hypothesis 1: * P(A = Someone had a coin in their pocket) = 0.8 * P(B = The coin fell out of it) = 0.9 * Hypothesis 2: * P(C = Someone had a coin in their hand) = 0.9 * P(D = A pigeon was flying by) = 0.8 * P(E = The pigeon stole the coin from the person's hand) = 0.6 * P(F = The pigeon then dropped the coin on the sidewalk) = 0.9 Remember: an event with probability 0.84 means that the event has 84% likelihood to happen. Now, let's compute the joint probability of the conditions of each hypothesis: * Hypothesis 1: $$ P(A, B) = 0.9 \cdot 0.8 = 0.72\\ $$ * Hypothesis 2: $$ P(C, D) = 0.9 \cdot 0.8 = 0.72\\ P(C, D, E) = 0.72 \cdot 0.6 = 0.432\\ P(C, D, E, F) = 0.432 \cdot 0.9 = 0.3888 $$ It is easy to see that the more (uncertain) assumptions an hypothesis has, the more unlikely it is for it to be correct. Just as predicted by the **Occam's Razor principle**! # Science and Reinforcement Learning We can also use Occam's razor in many other fields, such as in reinforcement learning, a topic that is of interest to many researchers at DeepMind. In reinforcement learning, the agent (for instance, the robot) is incentivised to do a particular task by being offered rewards for its behaviour. The agent then simply tries to get as much reward as it can. For example, giving a treat to your dog when it gives you its paw, when teaching it to "shake your hand", leads to your dog learning the skill, because the dog wants to get more treats. To maximise the reward, the agent needs to discover what is rewarding! So in many ways it is just like a scientist - trying things, observing what happens, learning from mistakes and repeating! <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/rl.png" width="600"/> </center> But this also means the agents we train will face the same hurdles that we do. As they explore the world to discover where the rewards are they must learn to use observations to guide their behavior! To understand this better, let us play a game where we are the agent trying to discover the reward in our world... # Let's play some games! <center> <img src="https://storage.googleapis.com/dm-educational/assets/scientific-thinking/where_rewards.png" alt="drawing" height=400"/> </center> **HOW TO PLAY:** In the Cells below, whenever the game is in 'Experiment Mode', you can control the agent with your keyboard. To play the game: First - * Make sure to run the cell where you want to play. * Click on the game image to select it and type in **'w'** to **move up**; **'s'** to **move down**; **'a'** to **move to the left** and **'d'** to **move to the right**. * When you hit **'Enter'** the **episode** (one round of the game) **ends**, the reward for that episode is printed onto the display and a new episode is started. The game may lag a little so be patient :) It's time to go exploring! ## Game 1: Can you guess the reward?! Lets start with a simple example. Play with the game below and try to figure out what the reward is... ``` #@title Game 1: Experiment Mode. level = 'L1' game_type = 'experiment' game_keeper = GameKeeper(game_type, level) img = Img(game_keeper.obs, brighten_factor=2.5) output.register_callback('notebook.UpdateImg', game_keeper.move) display(IPython.display.Javascript(''' document.addEventListener("keydown", async function(e){ const result = await google.colab.kernel.invokeFunction( 'notebook.UpdateImg', // The callback name. [e.key], // The arguments. {}); // kwargs }); ''')) img ``` Once you have an idea of what the reward is, in the map shown below, enter what you think the reward should be for the path taken in blue (when the path leads to an object, assume the agent went onto the object). Try to experiment a few times before looking at the solution in the cell! ``` #@title Game 1: Evaluation Map level = 'L1' game_type = 'eval' game_keeper = GameKeeper(game_type, level) img_eval = Img(game_keeper.obs, brighten_factor=2.5) img_eval # @title Fill out answers reward = 0 #@param {type:"number"} if reward == 3.0: print("That's right! The agent will get a reward of 3.0!") else: print("Oh no! That wasn't right. Try again :)") ``` What is the *maximum* reward any agent could get on the second map? ``` maximum_possible_reward = 0 #@param {type:"number"} if maximum_possible_reward == 3.0: print("That's right! The agent can get a maximum reward of 3.0!") else: print("Oh no! That wasn't the maximum reward :( What is the best thing the agent can do here?") ``` ## Game 2: Can you figure out the reward rule? Alright! Now that we're warmed up, lets look at another game. Remember.. this is a completely new game in a new world with its own rules! ``` #@title Game 2: Experiment Mode. level = 'L2' game_type = 'experiment' game_keeper = GameKeeper(game_type, level, per_timestep_reward=-1.0) img = Img(game_keeper.obs, brighten_factor=2.5) output.register_callback('notebook.UpdateImg', game_keeper.move) display(IPython.display.Javascript(''' document.addEventListener("keydown", async function(e){ const result = await google.colab.kernel.invokeFunction( 'notebook.UpdateImg', // The callback name. [e.key], // The arguments. {}); // kwargs }); ''')) img ``` Can you come up with multiple hypotheses to explain your observations? Are they falsifiable? Try to eliminate your hypothesis until only one is left! In the map shown below, enter what you think the reward should be for the path taken in blue (when the path leads to an object, assume the agent went onto the object). Try to experiment a few times before looking at the solution in the cell! ``` #@title Game 2: Evaluation Map level = 'L2' game_type = 'eval' game_keeper = GameKeeper(game_type, level, per_timestep_reward=-1.0) img_eval = Img(game_keeper.obs, brighten_factor=2.5) img_eval # @title Fill out answers reward = 0 #@param {type:"number"} if reward == -10.0: print("That's right! The agent will get a reward of -10.!") else: print("Oh no! That wasn't right. Try again :)") maximum_possible_reward = 0 #@param {type:"number"} if maximum_possible_reward == 0.0: print("That's right! The agent can get a maximum reward of 0.0!") else: print("Oh no! That wasn't the maximum reward :( What is the best thing the agent can do here?") ``` ## Game 3: Can you figure out the reward rule? OK! Lets do one more map. Remember - each new map is a whole new world with its own rules. All we have is our science toolbox to help us along the way... ``` #@title Game 3: Experiment Mode. level = 'L3' game_type = 'experiment' game_keeper = GameKeeper(game_type, level) img = Img(game_keeper.obs, brighten_factor=2.5) output.register_callback('notebook.UpdateImg', game_keeper.move) display(IPython.display.Javascript(''' document.addEventListener("keydown", async function(e){ const result = await google.colab.kernel.invokeFunction( 'notebook.UpdateImg', // The callback name. [e.key], // The arguments. {}); // kwargs }); ''')) img ``` Sometimes it helps to write down our hypothesis and what assumptions they each make. When we write things down explicitly, Occam's razor can often help us figure out what is most likely to be true! In the map shown below, enter what you think the reward should be for the path taken in blue (when the path leads to an object, assume the agent went onto the object). Try to experiment a few times before looking at the solution in the cell! ``` #@title Game 3: Evaluation Map level = 'L3' game_type = 'eval' game_keeper = GameKeeper(game_type, level) img_eval = Img(game_keeper.obs, brighten_factor=2.5) img_eval # @title What reward would the agent get? reward = 0 #@param {type:"number"} if reward == 0.0: print("That's right! The agent will get a reward of 0.0!") else: print("Oh no! That wasn't right. Try again :)") maximum_possible_reward = 0 #@param {type:"number"} if maximum_possible_reward == 10.0: print("That's right! The agent can get a maximum reward of 10.0!") else: print("Oh no! That wasn't the maximum reward :( What is the best thing the agent can do here?") ```
github_jupyter
``` from zipline import run_algorithm from zipline.api import order_target_percent, symbol, order, record from datetime import datetime import pytz import matplotlib.pyplot as plt from trading_calendars.exchange_calendar_binance import BinanceExchangeCalendar import pandas as pd from trading_calendars import get_calendar import pyfolio as pf import numpy as np ``` ### A few things to note: #### 1) zipline enters the ordered stock and amount in the order book (order() function). After the handle_data() function has finished, zipline looks for any open orders and tries to fill them. If the trading volume is high enough for this stock, the order is executed after adding the *commission* and applying the *slippage model* which models the influence of your order on the stock price, so your algorithm will be charged more than just the stock price. #### 2) Order execution - When your algorithm places an order on a given bar, the order begins filling until the next bar regardless of the slippage model used. This way, the backtester guards the algorithm against lookahead bias. ``` def initialize(context): context.udy = symbol("ETHBTC") # context.has_ordered = False context.n_udy_to_buy = 10 def handle_data(context, data): slowma = data.history(context.udy, fields='price', bar_count=50, frequency='1m').mean() fastma = data.history(context.udy, fields='price', bar_count=20, frequency='1m').mean() # trading logic if fastma > slowma: # placing buy order order(context.udy, context.n_udy_to_buy) buy = True # order_target_percent(context.udy, 10) if fastma < slowma: # placing sell order order(context.udy, -context.n_udy_to_buy) sell = True # order_target_percent(context.udy, -10) record(ETHBTC=data.current(context.udy, fields='price'), fastma = fastma, slowma = slowma) # standard analysis provided by pyFolio def analyze_py(context, perf): # Use PyFolio to generate a performance report returns, positions, transactions = pf.utils.extract_rets_pos_txn_from_zipline(perf) print(returns.cumsum()) fig = pf.create_returns_tear_sheet(returns, benchmark_rets=None) for ax in fig.axes: ax.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=True, top=False, labelbottom=True) # labels along the bottom edge are on # customized analysis def analyze(context, perf): fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(511) ax.set_title('Strategy Results') ax.semilogy(perf['portfolio_value'], linestyle='-', label='Equity Curve', linewidth=3.0) ax.legend() ax.grid(False) ax = fig.add_subplot(512) ax.plot(perf['gross_leverage'], label = 'Exposure', linestyle='-', linewidth=1.0) ax.legend() ax.grid(True) ax = fig.add_subplot(513) ax.plot(perf['returns'], label='Returns', linestyle='-.', linewidth=1.0) ax.legend() ax.grid(True) ax = fig.add_subplot(514) ax.plot(perf['max_drawdown'], label='max drawdown', linestyle='-.', linewidth=1.0) ax.legend() ax.grid(True) # calculate 6 months rolling sharp ratio # risk free rate 2% # perf['6m_rolling_SR'] = perf['returns'].rolling(180).apply(lambda x: (x.mean() - 0.02)) # perf.fillna(0, inplace = True) # print(perf['6m_rolling_SR']) def my_rolling_sharpe(y): return np.sqrt(126) * (y.mean() / y.std()) ax = fig.add_subplot(515) perf['6m_rolling_SR'] = perf['returns'].rolling(180).apply(my_rolling_sharpe) # to revisit # print(perf['6m_rolling_SR']) ax.plot(perf['6m_rolling_SR'], label='Sharpe', linestyle='-', lw=2, color='orange') # perf[perf['6m_rolling_SR'] > 0]["6m_rolling_SR"].plot(style='-', lw=2, color='orange', # label='Sharpe', figsize = (10,7)) ax.legend() ax.grid(True) # calculate sharp ratio # risk_free_rate = 0.02 # 10 year Treasury bond # daily_rf_return = (1 + risk_free_rate)** 1/252 - 1 # daily_rf_return %%time start_date = pd.Timestamp(datetime(2017, 8, 14, tzinfo=pytz.UTC)) end_date = pd.Timestamp(datetime(2018, 2, 15, tzinfo=pytz.UTC)) results = run_algorithm( start=start_date, end=end_date, initialize=initialize, trading_calendar = get_calendar("Binance"), analyze=analyze, # customized analysis handle_data=handle_data, capital_base=10000, data_frequency='minute', bundle='binance_1m', # benchmark_returns=None ) # %%time # start_date = pd.Timestamp(datetime(2017, 7, 14, tzinfo=pytz.UTC)) # end_date = pd.Timestamp(datetime(2019, 12, 31, tzinfo=pytz.UTC)) # results = run_algorithm( # start=start_date, # end=end_date, # initialize=initialize, # trading_calendar = get_calendar("Binance"), # analyze=analyze_py, # pyfolio standard analysis # handle_data=handle_data, # capital_base=10000, # data_frequency='minute', # bundle='binance_1m', # # benchmark_returns=None # ) pd.set_option('display.max_columns', None) results.head(10) ``` #### A moving average strategy , when short-term MA cross above long-term MA, buy 10 ETHBTC and when short-term MA cross below long-term MA, sell 10 ETHBTC. ``` # day 1 results["transactions"][0] # last day results["transactions"][-1] ```
github_jupyter
# Simulation to extend Hanna & Olken (2018) ## Universal Basic Incomes versus Targeted Transfers: Anti-Poverty Programs in Developing Countries Consider different budget levels, and a mix of UBI and targeted transfers. Simulation notebook. ## Setup ``` def import_or_install(package, pip_install=None): """ Try to install a package, and pip install if it's unavailable. Args: package: Package name. pip_install: Location to pip install from. Runs `pip install [package]` if not provided. """ import pip if pip_install is None: pip_install = package try: __import__(package) except ImportError: pip.main(['install', package]) import_or_install('pandarallel') import pandas as pd import numpy as np import os import microdf as mdf from pandarallel import pandarallel pandarallel.initialize() ``` ## Load data [This notebook](https://colab.research.google.com/drive/1dxg8kjXHV7Fc-qKlaA0LjNPFrzLD0JVM) downloads this file directly from the Census Bureau. ``` SPM_COLS = ['SPM_ID', 'SPM_NUMPER', 'SPM_RESOURCES', 'SPM_POVTHRESHOLD', 'SPM_WEIGHT'] raw = pd.read_csv( 'https://github.com/MaxGhenis/datarepo/raw/master/pppub19.csv.gz', usecols=SPM_COLS + ['MARSUPWT']) ``` Source: [World Bank](https://data.worldbank.org/indicator/NY.GDP.MKTP.CD?locations=US) (as of 2018) ``` US_GDP = 20.5e12 ``` ## Preprocess ``` u = raw.groupby(SPM_COLS).sum() u.reset_index([i for i in SPM_COLS if i != 'SPM_ID'], inplace=True) ``` Define `y` to be resources per person. Set values below \$1 to \$1 so that CRRA works. ``` u['y0'] = np.maximum(1., u.SPM_RESOURCES / u.SPM_NUMPER) u['w'] = u.SPM_WEIGHT / 100 ``` Assign weighted rank by income. ``` u.sort_values('y0', inplace=True) u['y0_rank'] = u.w.cumsum() u['y0_pr'] = u.y0_rank / u.w.sum() ``` ### Add noisy income The actual value of the noisy income isn't important, since it's only used for ranking households. Therefore, random normal noise is sufficient. Set noise level to match Hanna and Olken's model: > The typical fit we found of these regressions (the R2) is between 0.53 and 0.66 Their [appendix](https://www.aeaweb.org/content/file?id=8344) shows that they were predicting log income. Shoot for average: 0.595. ``` np.random.seed(0) TARGET_R2 = 0.595 def log_noise(y, noise_mean): return np.exp(np.log(y) + noise_mean * np.random.randn(len(y))) def r2(noise_mean): y_noise = log_noise(u.y0, noise_mean) r = np.corrcoef(np.log(u.y0), np.log(y_noise))[0, 1] return np.power(r, 2) NOISE_LEVEL = 1.37 r2(NOISE_LEVEL) # Close to 0.595. r2(NOISE_LEVEL * 2) u['y0_l_noise'] = log_noise(u.y0, NOISE_LEVEL) u['y0_h_noise'] = log_noise(u.y0, NOISE_LEVEL * 2) ``` Re-rank. ``` u.sort_values('y0_l_noise', inplace=True) u['y0_rank_l_noise'] = u.w.cumsum() u['y0_pr_l_noise'] = u.y0_rank_l_noise / u.w.sum() u.sort_values('y0_h_noise', inplace=True) u['y0_rank_h_noise'] = u.w.cumsum() u['y0_pr_h_noise'] = u.y0_rank_h_noise / u.w.sum() ``` Check R-squared from noisy to true income rank. **Low noise** ``` u[['y0_rank', 'y0_rank_l_noise']].corr().iloc[0, 1] ``` **High noise** ``` u[['y0_rank', 'y0_rank_h_noise']].corr().iloc[0, 1] ``` ## Analysis ### Define CRRA function ``` def crra(y, w=None, rho=3): """ Constant relative risk-aversion social welfare function. Args: y: Array of after-tax after-transfer income. w: Optional array of weights. Should be the same length as y. rho: Coefficient of relative risk-aversion, where higher values of rho put higher weights on transfers received by the very poor. Defaults to 3 per Hanna and Olken (2018). Returns: CRRA SWF. Also sets any value below 1 to 1. """ num = np.power(np.array(y, dtype=float), 1 - rho) if w is not None: num *= w return num.sum() / (1 - rho) ``` Status quo CRRA value. ``` crra0 = crra(u.y0, u.w) crra0 ``` ### Define horizontal equity function From Hanna and Olken (2018): >At each cutoff c, we calculate, for each household, the percentage of households within ±5 income percentiles (based on actual income) that received the same benefit status—included or excluded—based on the results of proxy-means test prediction. In other words, for households that were included in the program at a given c, we calculate the percentage of similar households that were also included; for households that were excluded, we calculate the percentage of similar households that were also excluded. **TODO** ### Define simulation function ``` total_hhs = u.w.sum() # Number of SPM units. def simulate(budget_share_of_gdp, pr_threshold, ubi_share, income_pr_col): """ Simulate a transfer split between targeted and UBI components. Args: budget_share_of_gdp: Total budget to be split between targeted and UBI components, as a share of US GDP (0 to 100). pr_threshold: Percentrank below which households get the targeted transfer. 0 to 100. ubi_share: Number between 0 and 100 representing the share of the transfer that goes to a UBI. income_col: Column indicating the income percent rank (true or noisy). Returns: Tuple of (targeted_amount, ubi_amount, crra). """ budget = US_GDP * budget_share_of_gdp / 100 ubi_budget = budget * (ubi_share / 100) targeted_budget = budget * (1 - ubi_share / 100) ubi_amount = ubi_budget / total_hhs target_idx = u[income_pr_col] < (pr_threshold / 100) target_hhs = u[target_idx].w.sum() targeted_amount = targeted_budget / target_hhs y1 = u.y0 + ubi_amount + np.where(target_idx, targeted_amount, 0) return targeted_amount, ubi_amount, crra(y1) ``` ## Simulate Cartesian product function from https://github.com/MaxGhenis/microdf/blob/master/microdf/utils.py ``` SIMX = { 'budget_share_of_gdp': [0.01, 0.1, 0.2, 0.5, 1, 5], 'noise_col': ['y0_pr', 'y0_pr_l_noise', 'y0_pr_h_noise'], 'pr_threshold': np.arange(0, 101, 1), 'ubi_share': np.arange(0, 101, 1) } sim = mdf.cartesian_product(SIMX) ``` Usually takes ~25 minutes. ``` %%time sim[['targeted_amount', 'ubi_amount', 'crra']] = sim.parallel_apply( lambda row: simulate(row.budget_share_of_gdp, row.pr_threshold, row.ubi_share, row.noise_col), axis=1, result_type='expand') ``` ## Postprocess Make the noise column a category. ``` sim['noise'] = pd.Categorical( np.where(sim.noise_col == 'y0_pr', 'No noise', np.where(sim.noise_col == 'y0_pr_l_noise', 'Low noise', 'High noise')), categories = ['No noise', 'Low noise', 'High noise']) sim.drop(['noise_col'], axis=1, inplace=True) ``` ## Export ``` sim.to_csv('sim.csv', index=False) ```
github_jupyter
# Harmonizome ETL: BioGPS (Human Cell Line) Created by: Charles Dai <br> Credit to: Moshe Silverstein Data Source: http://biogps.org/downloads/ ``` # appyter init from appyter import magic magic.init(lambda _=globals: _()) import sys import os from datetime import date import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import harmonizome.utility_functions as uf import harmonizome.lookup as lookup %load_ext autoreload %autoreload 2 ``` ### Notebook Information ``` print('This notebook was run on:', date.today(), '\nPython version:', sys.version) ``` # Initialization ``` %%appyter hide_code {% do SectionField( name='data', title='Upload Data', img='load_icon.png' ) %} {% do SectionField( name='settings', title='Settings', img='setting_icon.png' ) %} %%appyter code_eval {% do DescriptionField( name='description', text='The following dataset examples were sourced from <a href="http://biogps.org/downloads/" target="_blank">biogps.org</a>. The example for probe annotations was sourced from <a href="http://www.affymetrix.com/support/technical/byproduct.affx?product=hgu133" target="blank">www.affymetrix.com</a>. If clicking on the examples does not work, they should be downloaded directly from the source website. ', section='data' ) %} {% set matrix_file = FileField( constraint='.*\.zip$', name='matrix', label='Dataset File (zip)', default='Input/BioGPS/gnf1h-gcrma.zip', examples={ 'gnf1h-gcrma.zip (Human U133A/GNF1H Gene Atlas)': 'http://plugins.biogps.org/download/gnf1h-gcrma.zip', 'NCI60_U133A_20070815.raw.csv.zip (Human NCI60 Cell Lines)': 'http://plugins.biogps.org/download/NCI60_U133A_20070815.raw.csv.zip' }, section='data' ) %} {% set gene_file = FileField( constraint='.*\.csv$', name='gene_meta', label='Probe Annotations (csv)', default='Input/BioGPS/HG-U133A.na36.annot.csv', examples={ 'HG-U133A.na36.annot.csv': 'http://www.affymetrix.com/Auth/analysis/downloads/na36/ivt/HG-U133A.na36.annot.csv.zip', }, section='data' ) %} %%appyter code_eval {% set dataset = ChoiceField( name='dataset', label='Dataset', choices={ 'Human U133A/GNF1H Gene Atlas': 'U133A', 'Human NCI60 Cell Lines': 'NCI60' }, default='Human U133A/GNF1H Gene Atlas', section='settings' ) %} ``` ### Load Mapping Dictionaries ``` symbol_lookup, geneid_lookup = lookup.get_lookups() ``` ### Output Path ``` %%appyter code_exec output_name = 'bioGPS-{{dataset}}' path = 'Output/BioGPS-{{dataset}}' if not os.path.exists(path): os.makedirs(path) ``` # Load Data ``` %%appyter code_exec matrix = pd.read_csv({{matrix_file}}, sep=',', index_col=0) matrix.head() matrix.shape ``` ## Load Probe Annotations ``` %%appyter code_exec gene_meta = pd.read_csv({{gene_file}}, sep=',', skiprows=25, usecols=['Probe Set ID', 'Gene Symbol'], index_col=0) gene_meta.head() gene_meta.shape ``` # Pre-process Data ## Map Gene to Probe ``` %%appyter code_exec columns_name = { 'U133A': 'Tissue', 'NCI60': 'Cell Line' }['{{dataset}}'] matrix.index = gene_meta.reindex(matrix.index)['Gene Symbol'] matrix.index.name = 'Gene Symbol' matrix.columns.name = columns_name matrix.head() ``` ## Revert Duplicate Column Names ``` matrix.columns = matrix.columns.map(lambda x: x.split('.')[0]) matrix.head() ``` ## Save Unfiltered Matrix to file ``` uf.save_data(matrix, path, output_name + '_matrix_unfiltered', compression='gzip', dtype=np.float32) ``` # Filter Data ## Map Gene Symbols to Up-to-date Approved Gene Symbols ``` matrix = uf.map_symbols(matrix, symbol_lookup) matrix.shape ``` ## Merge Duplicate Genes By Rows and Duplicate Columns ``` matrix = uf.merge(matrix, 'row') matrix = uf.merge(matrix, 'column') matrix.shape ``` ## Remove Data that is More Than 95% Missing and Impute Missing Data ``` matrix = uf.remove_impute(matrix) matrix.head() matrix.shape ``` ## Log2 Transform ``` matrix = uf.log2(matrix) matrix.head() ``` ## Normalize Matrix (Quantile Normalize the Matrix by Column) ``` matrix = uf.quantile_normalize(matrix) matrix.head() ``` ## Normalize Matrix (Z-Score the Rows) ``` matrix = uf.zscore(matrix) matrix.head() ``` ## Histogram of First Sample ``` matrix.iloc[:, 0].hist(bins=100) ``` ## Histogram of First Gene ``` matrix.iloc[0, :].hist(bins=100) ``` ## Save Filtered Matrix ``` uf.save_data(matrix, path, output_name + '_matrix_filtered', ext='tsv', compression='gzip') ``` # Analyze Data ## Create Gene List ``` gene_list = uf.gene_list(matrix, geneid_lookup) gene_list.head() gene_list.shape uf.save_data(gene_list, path, output_name + '_gene_list', ext='tsv', compression='gzip', index=False) ``` ## Create Attribute List ``` attribute_list = uf.attribute_list(matrix) attribute_list.head() attribute_list.shape uf.save_data(attribute_list, path, output_name + '_attribute_list', ext='tsv', compression='gzip') ``` ## Create matrix of Standardized values (values between -1, and 1) ``` standard_matrix = uf.standardized_matrix(matrix) standard_matrix.head() uf.save_data(standard_matrix, path, output_name + '_standard_matrix', ext='tsv', compression='gzip') ``` ## Plot of A Single Celltype, Normalized Value vs. Standardized Value ``` plt.plot(matrix[matrix.columns[0]], standard_matrix[standard_matrix.columns[0]], 'bo') plt.xlabel('Normalized Values') plt.ylabel('Standardized Values') plt.title(standard_matrix.columns[0]) plt.grid(True) ``` ## Create Ternary Matrix ``` ternary_matrix = uf.ternary_matrix(standard_matrix) ternary_matrix.head() uf.save_data(ternary_matrix, path, output_name + '_ternary_matrix', ext='tsv', compression='gzip') ``` ## Create Gene and Attribute Set Libraries ``` uf.save_setlib(ternary_matrix, 'gene', 'up', path, output_name + '_gene_up_set') uf.save_setlib(ternary_matrix, 'gene', 'down', path, output_name + '_gene_down_set') uf.save_setlib(ternary_matrix, 'attribute', 'up', path, output_name + '_attribute_up_set') uf.save_setlib(ternary_matrix, 'attribute', 'down', path, output_name + '_attribute_down_set') ``` ## Create Attribute Similarity Matrix ``` attribute_similarity_matrix = uf.similarity_matrix(standard_matrix.T, 'cosine') attribute_similarity_matrix.head() uf.save_data(attribute_similarity_matrix, path, output_name + '_attribute_similarity_matrix', compression='npz', symmetric=True, dtype=np.float32) ``` ## Create Gene Similarity Matrix ``` gene_similarity_matrix = uf.similarity_matrix(standard_matrix, 'cosine') gene_similarity_matrix.head() uf.save_data(gene_similarity_matrix, path, output_name + '_gene_similarity_matrix', compression='npz', symmetric=True, dtype=np.float32) ``` ## Create Gene-Attribute Edge List ``` edge_list = uf.edge_list(standard_matrix) uf.save_data(edge_list, path, output_name + '_edge_list', ext='tsv', compression='gzip') ``` # Create Downloadable Save File ``` uf.archive(path) ``` ### Link to download output files: [click here](./output_archive.zip)
github_jupyter
# Deterministic methods ## Point estimates If we just want to find the parameter value that maximizes the posterior probability, we can just use numerical optimization over $p(y \mid \theta)p(\theta)$. The value found is known as the Maximum a Posteriori (or MAP), and is the Bayesian counterpart of the Maximum Likelihood Estimate (MLE). However, a point estimate gives relatively little information and may be highly misleading, and hence we are usually interested in estimating the full posterior distribution. As we have seen, MCMC is one method for estimating the posterior. However, MCMC is relatively slow, and an alternative is to use deterministic Variational Inference (VI) methods which are usually much faster. The trade-off is that VI methods can only find an approximation of the true posterior, and the approximation may not be very good. ## Laplace approximation The basic idea is to use a Gaussian $N(\mu, \Sigma)$ centered at the mode of the log posterior distribution as an approximation. This can be done by first finding the mode using numerical optimization and using that for $\mu$, then estimating the covariance as the inverse of the Hessian at the mode. Note that for a Gaussian, the negative log likelihood basically has the form $a + b + \frac{1}{2}x^T A x$ where $A = \Sigma^{-1}$ and $a, b$ are terms that don't depend on $x$. By differentiating, we get that the Hessian is the inverse covariance matrix. Notes and illustrations in class. ## Entropy $$ H(p) = -\sum_{i} p_i \log(p_i) $$ ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np import scipy.stats as stats def entropy(p): """Calculate entropy.""" return -np.sum(p * np.log(p)) for σ in 1, 10, 100: print('N(1, %3d) entropy = %.2f' % (σ, stats.norm(0, σ).entropy())) ``` ## Kullback-Leibler divergence (relative entropy) $$ D_{\text{KL}}(p \vert\vert q) = \sum_i p_i \log \frac{p_i}{q_i} $$ In the usual interpretation, $p$ is the true distribution (e.g. posterior probability), and $q$ is an approximation (e.g. prior probability). $D_{\text{KL}}(p \vert\vert q)$ is a measure of how well $q$ approximates $p$, and hence is usually read as the Kullback Leibler divergence from $q$ to $p$. Properties of $D_{\text{KL}}(p \vert\vert q)$ - non-negative (e.g. use Jensen's inequality) $$ D_{\text{KL}}(p \vert\vert q) = \sum_i p_i \log \frac{p_i}{q_i} = -\sum_i p_i \log \frac{q_i}{p_i} \ge -\log\sum_i p_i \frac{q_i}{p_i} = 0 $$ - equal to zero only if $p = q$ almost everywhere - invariant under parameter transforms - Suppose $p(x) = p_1(x) p_2(x)$ and $q(x) = q_1(x) q_2(x)$, then $$D_{\text{KL}}(p \vert\vert q) = D_{\text{KL}}(p_1 \vert\vert q_1) + D_{\text{KL}}(p_2 \vert\vert q_2)$$ Note: - If $p_i = 0$ then $i$ contributes 0 to DKL - If $q_i = 0$ and $p_i = 0$, then $i$ contributes 0 to DKL - If $q_i = 0$ and $p_i \ne 0$, then DKL is undefined. ``` xs = np.random.poisson(5, 1000) ys = np.bincount(xs) ys = ys/ys.sum() plt.stem(ys) pass r = np.arange(len(ys)) r fig, axes = plt.subplots(1,3,figsize=(12,3), sharey=True) for ax, λ in zip(axes, (4, 5,6)): ax.stem(ys) ax.stem(r+0.3, stats.poisson(λ).pmf(r), linefmt='C3-', markerfmt='C3o') ax.set_title('DKL(p, Poisson(%d}) = %.2f' % (λ, stats.entropy(ys, stats.poisson(λ).pmf(r)))) ``` ## Evidence lower bound (ELBO) We want to approximate the posterior distribution $p(\theta \mid y)$ with $q(\theta)$. In the usual approach, we want to minimize \begin{array} \\ D_{\text{KL}}(q(\theta) \vert\vert p(\theta | y)) &= \int q(\theta) \log \frac{q(\theta)}{p(\theta \mid y)} \ d\theta \\ &= \int q(\theta) \log \frac{q(\theta)}{p(\theta, y)}p(y) \ d\theta \\ &= \int q(\theta) \left( \log \frac{q(\theta)}{p(\theta, y)} + \log p(y) \right) \ d\theta \\ &= \int q(\theta) \log \frac{q(\theta)}{p(\theta, y)} \ d\theta + \int q(\theta) \log p(y) \ d\theta \\ &= - \int q(\theta) \log \frac{p(\theta, y)}{q(\theta)} \ d\theta + \log p(y) \end{array} Since the Kullback-Leibler divergence $\ge 0$, the marginal likelihood or evidence $p(y) \ge \int q(\theta) \log \frac{p(\theta, y)}{q(\theta)} \ d\theta$ and $\int q(\theta) \log \frac{p(\theta, y)}{q(\theta)} \ d\theta$ is known as the Evidence Lower Bound (ELBO). The ELBO can also be seen as $E_q[\log p(\theta, y)] - E[\log q(\theta)]$, where the second term is the entropy (or differential entropy). Hence if $q(\theta)$ is a family of (simple) distributions with tuning parameters $\lambda$, finding the values of $\lambda$ that maximize the ELBO is equivalent to minimizing $D_{\text{KL}}(q(\theta) \vert\vert p(\theta | y))$. ### Variational Inference with a mean field approximation To estimate $\theta = (\theta_1, \theta_2, \ldots, \theta_n)$, the mean field approximation assumes that $$ q(\theta) = \prod_{i=1}^{n} q(\theta_i) $$ The factorization gives a form of the ELBO that can be solved by numerical optimization. Note that the solution found will usually not be the true posterior, since the mean field approximations assume that the variables are independent. ![img](https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/0dffe45c7df96e18d3300ce1d0f08d9debed4a38/9-Figure1-1.png) Source: [Variational Inference: A Review for Statisticians](https://arxiv.org/pdf/1601.00670.pdf) ## ADVI The optimization is usually done with gradient information with derivatives found by automatic differentiation, and hence this family of probabilistic inference engines is known as Automatic Differentiation Variational Inference (ADVI). For further reading, see [Variational Inference: A Review for Statisticians](https://arxiv.org/pdf/1601.00670.pdf)
github_jupyter
``` %matplotlib inline import matplotlib.pyplot as plt import fitsio as ft import numpy as np import sys sys.path.append('/Users/mehdi/github/LSSutils') from LSSutils import utils, catalogs nside= 128 data = ft.read(f'/Users/mehdi/Dropbox/LRG_density_maps/heapix_map_lrg_ir_nominal_20191024_clean_combined_{nside}.fits', lower=True) data.dtype.names data.size np.unique(data['region']) des = data[data['region'] == 'des'] decals = data[data['region'] == 'decals'] des.size + decals.size def shiftra(ra): return ra - 360 * (ra>300) for data_i in [des, decals]: print(data_i.size) plt.scatter(shiftra(data_i['ra']), data_i['dec'], 1, c=data_i['density'], vmin=400, vmax=900, cmap=plt.cm.jet_r) plt.colorbar() means = [] fig, ax = plt.subplots() i=0 c=['k', 'r'] for name_i, data_i in zip(['DES', 'DECaLS'],[des, decals]): means.append(np.mean(data_i['density'])) ax.hist(np.sqrt(data_i['density']), density=True, #range=(2, 3.5), #range=(0, 2000), bins=50, cumulative=False, histtype='step', color=c[i]) ax.text(0.8, 0.9-i*0.05, name_i, transform=ax.transAxes, color=c[i]) i += 1 ax.set(xlabel='sqrt(density)', yticks=[])#, yscale='log') means[0]/means[1]-1 from scipy.stats import spearmanr, pearsonr def PCC(xc, yc, kind='spearman'): if not kind in ['pearson', 'spearman']: raise ValueError(f'{kind} not defined') elif kind == 'pearson': func = pearsonr elif kind == 'spearman': func = spearmanr pcc = [] for j in range(xc.shape[1]): pcc.append(func(xc[:,j], yc)[0]) return pcc def BTPCC(xc, yc, num=100): np.random.seed(123456) pcc = [] for _ in range(num): pcc.append(PCC(xc, np.random.permutation(yc))) return pcc list_of_cols = ['ebv', 'galdepth_gmag', 'galdepth_rmag', 'galdepth_zmag', 'psfdepth_w1mag', 'psfsize_g', 'psfsize_r', 'psfsize_z', 'stardens_log'] import pandas as pd def make_pcc_plot(data, list_of_cols=list_of_cols, figax=None, title='PCC'): pd_des = pd.DataFrame(np.array(data).byteswap().newbyteorder()) templates = pd_des[list_of_cols].values pcci = PCC(templates, pd_des['density']) pccs = BTPCC(templates, pd_des['density']) prcntiles = np.percentile(pccs, [2, 98], axis=0) x = np.arange(len(prcntiles[0])) if figax is None: fig, ax = plt.subplots() else: fig, ax = figax ax.bar(x, pcci, color='crimson', alpha=0.5) ax.fill_between(x, y1=prcntiles[0], y2=prcntiles[1], alpha=0.2, color='b') ax.set_xticks(x) ax.grid(True, ls=':', color='grey') ax.set_xticklabels(list_of_cols, rotation=90) ax.set(ylabel='PCC') ax.axhline(0, color='k') ax.text(0.1, 0.9, title, transform=ax.transAxes) ``` ranked correlation coef. ``` fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5), sharey=True) make_pcc_plot(des, figax=(fig, ax1), title='DES') make_pcc_plot(decals, figax=(fig, ax2), title='DECaLS') from LSSutils.stats.nnbar import NNBAR help(NNBAR) np.percentile(des['pix_frac'], [0, 2, 50, 98, 100]) nnbar = {} for name_i, data_i in zip(['des', 'decals'], [des, decals]): nnbar_i = {} for template_i in list_of_cols: print(template_i) # NNBAR(galmap, ranmap, mask, sysmap, nbins=20, selection=None, binning='equi-area', Nnbar_i = NNBAR(data_i['density']*data_i['pix_frac'], data_i['pix_frac'], np.ones(data_i['pix_frac'].size, '?'), data_i[template_i], nbins=8, binning='equi-area') Nnbar_i.run(njack=10) nnbar_i[template_i] = Nnbar_i.output nnbar[name_i] = nnbar_i fig, ax = plt.subplots(ncols=3, nrows=3, figsize=(15, 12)) ax = ax.flatten() for i, name_i in enumerate(nnbar.keys()): for j, sys_j in enumerate(list_of_cols): ax[j].errorbar(nnbar[name_i][sys_j]['bin_edges'][:-1], nnbar[name_i][sys_j]['nnbar'], nnbar[name_i][sys_j]['nnbar_err']) ax[j].set(ylim=(0.9, 1.1), xlabel=sys_j) for data_i in [des, decals]: print(data_i.size) plt.scatter(shiftra(data_i['ra']), data_i['dec'], 1, c=data_i['galdepth_rmag'], vmin=24, vmax=25, cmap=plt.cm.jet_r) plt.colorbar(extend='both', label='galdepth_rmag') for name_i,data_i in zip(['DES', 'DECaLS'],[des, decals]): print(data_i.size, np.percentile(data_i['galdepth_rmag'],[0,100])) plt.hist(data_i['galdepth_rmag'], histtype='step', density=True, label=name_i, range=(23.4, 25.1), bins=50) plt.legend() plt.xlabel('galdepth_rmag') plt.yticks([]) # plt.colorbar(extend='both', label='galdepth_rmag') from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.feature_selection import RFE decals.size/des.size desdec = np.concatenate([des, decals[np.random.choice(np.arange(decals.size), replace=False, size=des.size)]]) y = (desdec['region'] == 'des').astype('float64') x = [] for l in list_of_cols: if l == 'ebv': print('log of ebv') x.append(np.log10(desdec[l])) else: x.append(desdec[l]) x = np.array(x).T x.shape, y.shape #--- split xtrain, xtest, ytrain, ytest = train_test_split(x, y) #--- scale xmean, xstd = np.mean(xtrain, axis=0), np.std(xtrain, axis=0) xtrain = (xtrain - xmean)/ xstd xtest = (xtest - xmean)/xstd ytrain.size/ytest.size logreg = LogisticRegression(solver='lbfgs') rfe = RFE(logreg, 2) rfe = rfe.fit(xtrain, ytrain) print(rfe.support_) print(rfe.ranking_) for i,si in enumerate(rfe.support_): if si:print(list_of_cols[i]) list_of_cols ypred = rfe.predict(xtest) from sklearn.metrics import confusion_matrix confusion_matrix(ytest, ypred) from sklearn.metrics import classification_report print(classification_report(ytest, ypred)) from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve logit_roc_auc = roc_auc_score(ytest, rfe.predict(xtest)) fpr, tpr, thresholds = roc_curve(ytest, rfe.predict_proba(xtest)[:,1]) plt.figure() plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc) plt.plot([0, 1], [0, 1],'r--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") plt.savefig('Log_ROC') plt.show() from sklearn.linear_model import LinearRegression des_decals = (data['region']=='des') | (data['region']=='decals') x = np.column_stack([np.log10(data['ebv']), data['galdepth_rmag']])[des_decals] y = data['density'][des_decals] x.shape reg = LinearRegression().fit(x, y) reg.coef_, reg.intercept_ ypred = reg.predict(x) for data_i in [data[des_decals]]: print(data_i.size) plt.scatter(shiftra(data_i['ra']), data_i['dec'], 1, c=ypred, cmap=plt.cm.jet) plt.colorbar() regions = data['region'][des_decals] _=plt.hist(ypred - y, bins=50, density=True) plt.xlabel('model density - input density') plt.yticks([]) np.sqrt(np.mean((ypred - data['density'][des_decals])**2)) np.std(data['density']) regions ypred[regions=='des'] / ypred[regions=='decals'] means = [] fig, ax = plt.subplots() i=0 c=['k', 'r'] for name_i, data_i in zip(['DES', 'DECaLS'],[ypred[regions=='des'] , ypred[regions=='decals']]): means.append(np.mean(data_i)) ax.hist(np.sqrt(data_i), density=True, #range=(2, 3.5), #range=(0, 2000), bins=50, cumulative=False, histtype='step', color=c[i]) ax.text(0.8, 0.9-i*0.05, name_i, transform=ax.transAxes, color=c[i]) i += 1 ax.set(xlabel='sqrt(modeled density)', yticks=[])#, yscale='log') means[0]/means[1]-1 means ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@title MIT License # # Copyright (c) 2017 François Chollet # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ``` # Predict house prices: regression This file has moved. <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/basic_regression"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table>
github_jupyter
# Comparing drift detectors We take the image classifier example and use it to compare drift detectors. We will give an opinionated take here. This is not to take shots at the research that enables TorchDrift, but reflects that the typical application in the wild may be dissimilar to the systematic, controlled experimentation in academic papers. We believe that the purpose of TorchDrift is providing tools to do drift detection as well as presenting good practice for practitioners. You are encouraged to study the literature, in particular [S. Rabanser et al: Failing Loudly](https://arxiv.org/abs/1810.11953), and also to do your own experimentation and draw your own conclusions. ``` import IPython import sys sys.path.insert(0, '../') import copy import tqdm import torchvision import functools import torch from typing import Optional, Any import torch import math import pytorch_lightning as pl import torchdrift import sklearn.manifold %matplotlib inline from matplotlib import pyplot torchvision.datasets.utils.download_and_extract_archive('https://download.pytorch.org/tutorial/hymenoptera_data.zip', 'data/') # these are the standard transforms without the normalization (which we move into the model.step/predict before the forward) train_transform = torchvision.transforms.Compose([ torchvision.transforms.RandomResizedCrop(size=(224, 224), scale=(0.08, 1.0), ratio=(0.75, 1.3333)), torchvision.transforms.RandomHorizontalFlip(p=0.5), torchvision.transforms.ToTensor()]) val_transform = torchvision.transforms.Compose([ torchvision.transforms.Resize(size=256), torchvision.transforms.CenterCrop(size=(224, 224)), torchvision.transforms.ToTensor()]) class OurDataModule(pl.LightningDataModule): def __init__(self, parent: Optional['OurDataModule']=None, additional_transform=None): if parent is None: self.train_dataset = torchvision.datasets.ImageFolder('./data/hymenoptera_data/train/', transform=train_transform) self.val_dataset = torchvision.datasets.ImageFolder('./data/hymenoptera_data/val/', transform=val_transform) self.test_dataset = torchvision.datasets.ImageFolder('./data/hymenoptera_data/test/', transform=val_transform) self.train_batch_size = 4 self.val_batch_size = 128 self.additional_transform = None else: self.train_dataset = parent.train_dataset self.val_dataset = parent.val_dataset self.test_dataset = parent.test_dataset self.train_batch_size = parent.train_batch_size self.val_batch_size = parent.val_batch_size self.additional_transform = additional_transform if additional_transform is not None: self.additional_transform = additional_transform self.prepare_data() self.setup('fit') self.setup('test') def setup(self, typ): pass def collate_fn(self, batch): batch = torch.utils.data._utils.collate.default_collate(batch) if self.additional_transform: batch = (self.additional_transform(batch[0]), *batch[1:]) return batch def train_dataloader(self): return torch.utils.data.DataLoader(self.train_dataset, batch_size=self.train_batch_size, num_workers=4, shuffle=True, collate_fn=self.collate_fn) def val_dataloader(self): return torch.utils.data.DataLoader(self.val_dataset, batch_size=self.val_batch_size, shuffle=False, collate_fn=self.collate_fn) def test_dataloader(self): return torch.utils.data.DataLoader(self.test_dataset, batch_size=self.val_batch_size, shuffle=False, collate_fn=self.collate_fn) def default_dataloader(self, batch_size=None, num_samples=None, shuffle=True): dataset = self.val_dataset if batch_size is None: batch_size = self.val_batch_size replacement = num_samples is not None if shuffle: sampler = torch.utils.data.RandomSampler(dataset, replacement=replacement, num_samples=num_samples) else: sampler = None return torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=sampler, collate_fn=self.collate_fn) datamodule = OurDataModule() ``` ## Feature extractor We use the TorchVision ResNet18 as the drift detector. ``` feature_extractor = torch.nn.Sequential( torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), torchvision.models.resnet18(pretrained=True) ) feature_extractor[0].fc = torch.nn.Identity() ``` ## Simulating drifted data For systematic experiments, we want to compare the output of the drift detector on benign (non-drifted) and and drifted, here (partially) out of distribution samples. We simulate out of distribution data by applying a gaussian blur. In reality you might have effects like the camera lense losing focus or dirt impeding the picture quality. Note that we do not use the drifted data for "training" the drift detector, but just for evaluation! On the technical side, we take our datamodule as the in-distribution datamodule as is and use a derived datamodule which applies the gaussian blur in addition to the usual transforms as the out of distribution datamodule. ``` def corruption_function(x: torch.Tensor): return torchdrift.data.functional.gaussian_blur(x, severity=2) ind_datamodule = datamodule ood_datamodule = OurDataModule(parent=datamodule, additional_transform=corruption_function) ``` Let us grab a few inputs and show them without and with corruption. ``` inputs, _ = next(iter(datamodule.default_dataloader(shuffle=True))) inputs_ood = corruption_function(inputs) N = 6 pyplot.figure(figsize=(15, 5)) for i in range(N): for j in range(2): pyplot.subplot(2, N, j * N + i + 1) if i == 0: pyplot.ylabel('vanilla' if j == 0 else 'drifted') pyplot.imshow((inputs if j == 0 else inputs_ood)[i].permute(1, 2, 0)) pyplot.xticks([]) pyplot.yticks([]) ``` ## Kernel MMD drift detector Or first detector is the Kernel MMD drift detector. As you may have guessed from the name, it uses a kernel to define a metric on the space of distributions on the feature-space (see our [note on the intuition behind MMD](./note_on_mmd.ipynb)). TorchDrift implements a few kernels in the `detectors.mmd` module, the `GaussianKernel` (also known as squared exponential) is the default, `ExpKernel` (aka Laplacian Kernel) and `RationalQuadraticKernel` are also available. In our experiments Kernel MMD worked very well, so we suggest it as a default. ``` drift_detector = torchdrift.detectors.KernelMMDDriftDetector() ``` We use the `utils.DriftDetectionExperiment` class to drive our experiment. It lets us set a ratio of OOD samples in the drifted samples and a sample size. While the statistical tests underpinning the drift detection could also produce p-values, we can also treat the test score as a value that can be thresholded for detection, giving the typical ROC curve. We see that for this setup, the detection power is quite strong. ``` od_model = drift_detector ind_datamodule = datamodule ood_datamodule = OurDataModule(parent=datamodule, additional_transform=corruption_function) ood_ratio = 0.8 sample_size = 10 experiment = torchdrift.utils.DriftDetectionExperiment(od_model, feature_extractor, ood_ratio=ood_ratio, sample_size=sample_size) experiment.post_training(datamodule.train_dataloader()) auc, (fp, tp) = experiment.evaluate(ind_datamodule, ood_datamodule) pyplot.plot(fp, tp) pyplot.title(label=f'{type(od_model).__name__}, $p_{{ood}}$={ood_ratio:.2f}, N={sample_size} AUC={auc:.3f}') pyplot.show() ``` ## Dimension Reduction & Kolmogorov-Smirnov test Next up is the Kolmogorov-Smirnov two sample test. We operationalize it by adding a dimension reduction to two PCA components (the PCA reducer estimates the PCA transform on the reference data during fitting and then applies this fixed transform to the test data). As suggested by _Failing Loudly_, we use the Bonferroni correction and perform the KS test on the marginals. ``` red = torchdrift.reducers.pca.PCAReducer(n_components=2) detector = torchdrift.detectors.ks.KSDriftDetector() reducer_detector = torch.nn.Sequential(red, detector) ``` Next we run our experiment just like before. This combination usually gives good results, typically a bit less AUC than the Kernel MMD, but typically between just below 0.8 and 0.75. ``` experiment = torchdrift.utils.DriftDetectionExperiment(reducer_detector, feature_extractor, ood_ratio=ood_ratio, sample_size=sample_size) experiment.post_training(datamodule.train_dataloader()) auc, (fp, tp) = experiment.evaluate(ind_datamodule, ood_datamodule) pyplot.plot(fp, tp) pyplot.title(label=f'{detector}, {red}\n$p_{{ood}}$={ood_ratio:.2f}, N={sample_size} AUC={auc:.3f}') pyplot.show() ``` ## Untrained Autoencoder Finally we use the Untrained Autoencoder. This is a bit of a funny name because it really half an autoencoder, so we might as well call it a untrained or randomly initialized feature extractor. This performed reasonably well in _Failing Loudly_, so it appears relatively frequently. In our experiments, this does not work as well as in the ones in _Failing Loudly_. Part of it may be that we have larger images so the feature extractor has "more work to do" and a purely random one does not perform as well. Another part may be that our sample size is lower. We believe that in both of these aspects, our setup is closer to (our) real-world use-cases. Our conclusion here is that the UAE applied to images directly is not as good a choice as working with a pretrained model. Of course, we would not need to see this as a binary decision but could combine a few layers of our trained model to start off with a randomly initialized top if we think that the topmost layers are too specialized on the classification task to be useful as a drift detector. ``` feature_extractor_red = torch.nn.Sequential( torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), torch.nn.Conv2d(3, 128, kernel_size=5, padding=2, stride=2), torch.nn.ReLU(), torch.nn.Conv2d(128, 256, kernel_size=5, padding=2, stride=2), torch.nn.ReLU(), torch.nn.Conv2d(256, 1024, kernel_size=5, padding=2, stride=2), torch.nn.ReLU(), torch.nn.AdaptiveMaxPool2d(8), torch.nn.Flatten(), torch.nn.Linear(1024*8*8, 32) ).cuda().eval() for m in feature_extractor_red: if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)): torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu') torch.nn.init.zeros_(m.bias) detector = torchdrift.detectors.ks.KSDriftDetector() experiment = torchdrift.utils.DriftDetectionExperiment(detector, feature_extractor_red, ood_ratio=ood_ratio, sample_size=sample_size) experiment.post_training(datamodule.train_dataloader()) auc, (fp, tp) = experiment.evaluate(ind_datamodule, ood_datamodule, num_runs=100) pyplot.plot(fp, tp) pyplot.title(label=f'{detector}, UAE, $p_{{ood}}$={ood_ratio:.2f}, N={sample_size} AUC={auc:.3f}') pyplot.show() ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt data = pd.read_csv('TSLA.csv') data.head() plt.figure(figsize=(12,8)) plt.plot(data['Open'], color='blue', label='Tesla Open Stock Price') plt.title('Tesla Stock Market Open Price vs Time') plt.xlabel('Date') plt.ylabel('Tesla Stock Price') plt.legend() plt.show() data['Date'] = pd.to_datetime(data['Date']) data.head() X = np.array(data['Open']) X = X.reshape(X.shape[0],1) X.shape from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range = (0, 1)) X = scaler.fit_transform(X) train = X[:2000] test = X[2000:] print(train.shape,'\n',test.shape) #Create X_train using 60 timesteps for each sample and 1 output X_train = [] y_train = [] for i in range(60, train.shape[0]): X_train.append(train[i-60:i, 0]) y_train.append(train[i, 0]) X_train, y_train = np.array(X_train), np.array(y_train) print(X_train.shape,'\n',y_train.shape) # Reshaping X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) X_train.shape pip install tensorflow==1.2.0 --ignore-installed conda install -c conda-forge tensorflow # Building the RNN # Importing the Keras libraries and packages from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import Dropout from tensorflow.keras import backend #Initialising the RNN regressor = Sequential() # Adding the first LSTM layer and some Dropout regularisation regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1))) regressor.add(Dropout(0.2)) # Adding a second LSTM layer and some Dropout regularisation regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) # Adding a third LSTM layer and some Dropout regularisation regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) # Adding a fourth LSTM layer and some Dropout regularisation regressor.add(LSTM(units = 50)) regressor.add(Dropout(0.2)) # Adding the output layer regressor.add(Dense(units = 1)) #compling the RNN regressor.compile(optimizer = 'adam', loss = 'mean_squared_error') regressor.fit(X_train, y_train, epochs = 100, batch_size = 32) # Create X_test using 60 timesteps for each sample X_test = [] y_test = [] for i in range(60, test.shape[0]): X_test.append(test[i-60:i, 0]) y_test.append(test[i, 0]) X_test, y_test = np.array(X_test), np.array(y_test) print(X_test.shape) X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) print(X_train.shape) # plot predictions vs real turnover on training set plt.figure(figsize=(15,10)) predicted = regressor.predict(X_train) predicted = scaler.inverse_transform(predicted) plt.plot(scaler.inverse_transform(train[-1940:]), color = 'red', label = 'Open Price') plt.plot(predicted, color = 'green', label = 'Predicted Open Price') plt.title('Tesla Stock Market Open Price vs Time') plt.xlabel('Time') plt.ylabel('Open Price') plt.legend() plt.show() X_test.shape # plotting predictions vs true turnover for the test set plt.figure(figsize=(15,10)) predicted = regressor.predict(X_test) predicted = scaler.inverse_transform(predicted) plt.plot(scaler.inverse_transform(test[-356:]), color = 'red', label = 'Open Price') plt.plot(predicted, color = 'green', label = 'Predicted Open Price') plt.title('Tesla Stock Market Open Price vs Time') plt.xlabel('Time') plt.ylabel('Open Price') plt.legend() plt.show() ```
github_jupyter
# Name Gather training data by querying BigQuery # Labels GCP, BigQuery, Kubeflow, Pipeline # Summary A Kubeflow Pipeline component to submit a query to BigQuery and store the result in a Cloud Storage bucket. # Details ## Intended use Use this Kubeflow component to: * Select training data by submitting a query to BigQuery. * Output the training data into a Cloud Storage bucket as CSV files. ## Runtime arguments: | Argument | Description | Optional | Data type | Accepted values | Default | |----------|-------------|----------|-----------|-----------------|---------| | query | The query used by BigQuery to fetch the results. | No | String | | | | project_id | The project ID of the Google Cloud Platform (GCP) project to use to execute the query. | No | GCPProjectID | | | | dataset_id | The ID of the persistent BigQuery dataset to store the results of the query. If the dataset does not exist, the operation will create a new one. | Yes | String | | None | | table_id | The ID of the BigQuery table to store the results of the query. If the table ID is absent, the operation will generate a random ID for the table. | Yes | String | | None | | output_gcs_path | The path to the Cloud Storage bucket to store the query output. | Yes | GCSPath | | None | | dataset_location | The location where the dataset is created. Defaults to US. | Yes | String | | US | | job_config | The full configuration specification for the query job. See [QueryJobConfig](https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html#google.cloud.bigquery.job.QueryJobConfig) for details. | Yes | Dict | A JSONobject which has the same structure as [QueryJobConfig](https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html#google.cloud.bigquery.job.QueryJobConfig) | None | ## Input data schema The input data is a BigQuery job containing a query that pulls data f rom various sources. ## Output: Name | Description | Type :--- | :---------- | :--- output_gcs_path | The path to the Cloud Storage bucket containing the query output in CSV format. | GCSPath ## Cautions & requirements To use the component, the following requirements must be met: * The BigQuery API is enabled. * The component is running under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow Pipeline cluster. For example: ``` bigquery_query_op(...).apply(gcp.use_gcp_secret('user-gcp-sa')) ``` * The Kubeflow user service account is a member of the `roles/bigquery.admin` role of the project. * The Kubeflow user service account is a member of the `roles/storage.objectCreator `role of the Cloud Storage output bucket. ## Detailed description This Kubeflow Pipeline component is used to: * Submit a query to BigQuery. * The query results are persisted in a dataset table in BigQuery. * An extract job is created in BigQuery to extract the data from the dataset table and output it to a Cloud Storage bucket as CSV files. Use the code below as an example of how to run your BigQuery job. ### Sample Note: The following sample code works in an IPython notebook or directly in Python code. #### Set sample parameters ``` %%capture --no-stderr KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz' !pip3 install $KFP_PACKAGE --upgrade ``` 2. Load the component using KFP SDK ``` import kfp.components as comp bigquery_query_op = comp.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/2e52e54166795d20e92d287bde7b800b181eda02/components/gcp/bigquery/query/component.yaml') help(bigquery_query_op) ``` ### Sample Note: The following sample code works in IPython notebook or directly in Python code. In this sample, we send a query to get the top questions from stackdriver public data and output the data to a Cloud Storage bucket. Here is the query: ``` QUERY = 'SELECT * FROM `bigquery-public-data.stackoverflow.posts_questions` LIMIT 10' ``` #### Set sample parameters ``` # Required Parameters PROJECT_ID = '<Please put your project ID here>' GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash # Optional Parameters EXPERIMENT_NAME = 'Bigquery -Query' OUTPUT_PATH = '{}/bigquery/query/questions.csv'.format(GCS_WORKING_DIR) ``` #### Run the component as a single pipeline ``` import kfp.dsl as dsl import kfp.gcp as gcp import json @dsl.pipeline( name='Bigquery query pipeline', description='Bigquery query pipeline' ) def pipeline( query=QUERY, project_id = PROJECT_ID, dataset_id='', table_id='', output_gcs_path=OUTPUT_PATH, dataset_location='US', job_config='' ): bigquery_query_op( query=query, project_id=project_id, dataset_id=dataset_id, table_id=table_id, output_gcs_path=output_gcs_path, dataset_location=dataset_location, job_config=job_config).apply(gcp.use_gcp_secret('user-gcp-sa')) ``` #### Compile the pipeline ``` pipeline_func = pipeline pipeline_filename = pipeline_func.__name__ + '.zip' import kfp.compiler as compiler compiler.Compiler().compile(pipeline_func, pipeline_filename) ``` #### Submit the pipeline for execution ``` #Specify pipeline argument values arguments = {} #Get or create an experiment and submit a pipeline run import kfp client = kfp.Client() experiment = client.create_experiment(EXPERIMENT_NAME) #Submit a pipeline run run_name = pipeline_func.__name__ + ' run' run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) ``` #### Inspect the output ``` !gsutil cat OUTPUT_PATH ``` ## References * [Component python code](https://github.com/kubeflow/pipelines/blob/master/component_sdk/python/kfp_component/google/bigquery/_query.py) * [Component docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile) * [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/bigquery/query/sample.ipynb) * [BigQuery query REST API](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query) ## License By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
github_jupyter
``` import pandas as pd #Loading data from Github repository filename = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter16/Dataset/processed.cleveland.data' # Loading the data using pandas heartData = pd.read_csv(filename,sep=",",header = None,na_values = "?") heartData.head() heartData.columns = ['age','sex', 'cp', 'trestbps','chol','fbs','restecg','thalach','exang','oldpeak','slope','ca','thal','label'] heartData.head() # Changing the Classes to 1 & 0 heartData.loc[heartData['label'] > 0 , 'label'] = 1 heartData.head() # Dropping all the rows with na values newheart = heartData.dropna(axis = 0) newheart.shape # Seperating X and y variables y = newheart.pop('label') y.shape X = newheart X.head() from sklearn.model_selection import train_test_split # Splitting the data into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123) ``` **Creating processing Engine** ``` from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler numeric_transformer = Pipeline(steps=[('scaler', StandardScaler())]) numeric_features = X.select_dtypes(include=['int64', 'float64']).columns from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer( transformers=[ ('num', numeric_transformer, numeric_features)]) ``` **Spot checking different models** ``` # Importing necessary libraries from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier # Creating a list of the classifiers classifiers = [ KNeighborsClassifier(), RandomForestClassifier(random_state=123), AdaBoostClassifier(random_state=123), LogisticRegression(random_state=123) ] # Looping through classifiers to get the best model for classifier in classifiers: estimator = Pipeline(steps=[('preprocessor', preprocessor), ('dimred', PCA(10)), ('classifier',classifier)]) estimator.fit(X_train, y_train) print(classifier) print("model score: %.2f" % estimator.score(X_test, y_test)) ``` **Grid Search** ``` # Creating a pipeline with Logistic Regression pipe = Pipeline(steps=[('preprocessor', preprocessor), ('dimred', PCA()), ('classifier',LogisticRegression(random_state=123))]) param_grid = {'dimred__n_components':[10,11,12,13],'classifier__penalty' : ['l1', 'l2'],'classifier__C' : [1,3, 5],'classifier__solver' : ['liblinear']} from sklearn.model_selection import GridSearchCV # Fitting the grid search estimator = GridSearchCV(pipe, cv=10, param_grid=param_grid) # Fitting the estimator on the training set estimator.fit(X_train,y_train) # Printing the best score and best parameters print("Best: %f using %s" % (estimator.best_score_, estimator.best_params_)) # Predicting with the best estimator pred = estimator.predict(X_test) # Printing the classification report from sklearn.metrics import classification_report print(classification_report(pred,y_test)) ```
github_jupyter
## Import ``` # Matplotlib import matplotlib.pyplot as plt # Tensorflow import tensorflow as tf # Numpy and Pandas import numpy as np import pandas as pd # Ohter import import sys from sklearn.preprocessing import StandardScaler ``` ## Be sure to used Tensorflow 2.0 ``` assert hasattr(tf, "function") # Be sure to use tensorflow 2.0 ``` ## Load the dataset: Fashion MNIST ![](https://storage.googleapis.com/kaggle-datasets-images/2243/3791/9384af51de8baa77f6320901f53bd26b/dataset-cover.png) ``` from sklearn.model_selection import train_test_split # Fashio MNIST fashion_mnist = tf.keras.datasets.fashion_mnist (images, targets), (_, _) = fashion_mnist.load_data() # Get only a subpart of the dataset # Get only a subpart images = images[:10000] targets = targets [:10000] images = images.reshape(-1, 784) images = images.astype(float) scaler = StandardScaler() images = scaler.fit_transform(images) images_train, images_test, targets_train, targets_test = train_test_split(images, targets, test_size=0.2, random_state=1) print(images_train.shape, targets_train.shape) print(images_test.shape, targets_test.shape) ``` ## Plot one of the data ``` targets_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot" ] # Plot one image plt.imshow(images[10].reshape(28, 28), cmap="binary") #plt.title(targets_names[targets[10]]) plt.title(targets_names[targets[10]]) plt.show() #print("First line of one image", images[11][0]) print("First line of one image", images[11]) print("Associated target", targets[11]) ``` # Create the model ![](images/simple_nn.png) # Add the layers ``` # Flatten model = tf.keras.models.Sequential() #model.add(tf.keras.layers.Flatten(input_shape=[28, 28])) # Add the layers model.add(tf.keras.layers.Dense(256, activation="relu")) model.add(tf.keras.layers.Dense(128, activation="relu")) model.add(tf.keras.layers.Dense(10, activation="softmax")) model_output = model.predict(images[0:1]) print(model_output, targets[0:1]) ``` ## Model Summary ``` model.summary() ``` ## Compile the model ``` # Compile the model model.compile( loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"] ) ``` ## Train the model ``` history = model.fit(images_train, targets_train, epochs=50, validation_split=0.2) loss_curve = history.history["loss"] acc_curve = history.history["accuracy"] loss_val_curve = history.history["val_loss"] acc_val_curve = history.history["val_accuracy"] plt.plot(loss_curve, label="Train") plt.plot(loss_val_curve, label="Val") plt.legend(loc='upper left') plt.title("Loss") plt.show() plt.plot(acc_curve, label="Train") plt.plot(acc_val_curve, label="Val") plt.legend(loc='upper left') plt.title("Accuracy") plt.show() loss, acc = model.evaluate(images_test, targets_test) print("Test loss", loss) print("Train accuracy", acc) ```
github_jupyter
# Amazon Augmented AI (Amazon A2I) integration with Tabular Data [Example] 1. [Introduction](#Introduction) 2. [Prerequisites](#Prerequisites) 1. [Workteam](#Workteam) 2. [Permissions](#Notebook-Permission) 3. [Client Setup](#Client-Setup) 4. [Create Control Plane Resources](#Create-Control-Plane-Resources) 1. [Create Human Task UI](#Create-Human-Task-UI) 2. [Create Flow Definition](#Create-Flow-Definition) 5. [Starting Human Loops](#Scenario-1-:-When-Activation-Conditions-are-met-,-and-HumanLoop-is-created) 1. [Wait For Workers to Complete Task](#Wait-For-Workers-to-Complete-Task) 2. [Check Status of Human Loop](#Check-Status-of-Human-Loop) 3. [View Task Results](#View-Task-Results) ## Introduction Amazon Augmented AI (Amazon A2I) makes it easy to build the workflows required for human review of ML predictions. Amazon A2I brings human review to all developers, removing the undifferentiated heavy lifting associated with building human review systems or managing large numbers of human reviewers. You can create your own workflows for ML models built on Amazon SageMaker or any other tools. Using Amazon A2I, you can allow human reviewers to step in when a model is unable to make a high confidence prediction or to audit its predictions on an on-going basis. Learn more here: https://aws.amazon.com/augmented-ai/ In this tutorial, we will show how you can use **Amazon A2I with Tabular data.** Tabular data is the most common form of data used by data scientists today for generating models. Use cases include, fraud detection, building customer propensity models, forecasting sales using regression etc. In many cases, data scientists often convert unstructured data such as text or images into structured tables that they then use for training models. Here we will first train a model and use the outputs of the trained model to build a human loop for review. For more in depth instructions, visit https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-getting-started.html To incorporate Amazon A2I into your human review workflows, you need three resources: * A **worker task template** to create a worker UI. The worker UI displays your input data, such as documents or images, and instructions to workers. It also provides interactive tools that the worker uses to complete your tasks. For more information, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-instructions-overview.html * A **human review workflow**, also referred to as a flow definition. You use the flow definition to configure your human workforce and provide information about how to accomplish the human review task. You can create a flow definition in the Amazon Augmented AI console or with Amazon A2I APIs. To learn more about both of these options, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html * A **human loop** to start your human review workflow. When you use one of the built-in task types, the corresponding AWS service creates and starts a human loop on your behalf when the conditions specified in your flow definition are met or for each object if no conditions were specified. When a human loop is triggered, human review tasks are sent to the workers as specified in the flow definition. When using a custom task type, as this tutorial will show, you start a human loop using the Amazon Augmented AI Runtime API. When you call `start_human_loop()` in your custom application, a task is sent to human reviewers. ### Install Latest SDK ``` # First, let's get the latest installations of our dependencies !pip install --upgrade pip !pip install boto3 --upgrade !pip install -U botocore ``` ## Setup We need to set up the following data: * `region` - Region to call A2I. * `BUCKET` - A S3 bucket accessible by the given role * Used to store the sample images & output results * Must be within the same region A2I is called from * `role` - The IAM role used as part of StartHumanLoop. By default, this notebook will use the execution role * `workteam` - Group of people to send the work to ### Role and Permissions The AWS IAM Role used to execute the notebook needs to have the following permissions: * SagemakerFullAccess * AmazonSageMakerMechanicalTurkAccess (if using MechanicalTurk as your Workforce) ``` from sagemaker import get_execution_role import sagemaker # Setting Role to the default SageMaker Execution Role role = get_execution_role() display(role) import os import boto3 import botocore sess = sagemaker.Session() #bucket BUCKET = sess.default_bucket() # or use a custom bucket if you created one. PREFIX = 'a2i-data' #specify output path for artifacts OUTPUT_PATH = f's3://{BUCKET}/a2i-results' # Region region = boto3.session.Session().region_name print(region) ``` ## Tabular data with Amazon SageMaker Before creating the template, we will load a tabular dataset, split the data into train and test, store the test data in Amazon S3, and train a machine learning model. The dataset we use is on Breast Cancer prediction and can be found here: [1] Dua, D. and Graff, C. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. Based on the input features, we will first train a model to detect a benign or malignant label. Once the model is trained, we will create an endpoint, and generate some model predictions. We will then create a WorkerUI to load in our immutable test dataset as a table, and dynamically modify the verify and change predictions if needed. ``` import pandas as pd from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split def generatedf(split_ratio): """Loads the dataset into a dataframe and generates train/test splits""" data = load_breast_cancer() df = pd.DataFrame(data.data, columns = data.feature_names) df['label'] = data.target cols = list(df.columns) cols = cols[-1:] + cols[:-1] df = df[cols] train, test = train_test_split(df, test_size=split_ratio, random_state=42) return train, test train_data, test_data = generatedf(0.2) train_data.head() #store the datasets locally train_data.to_csv('train.csv',index = None, header=None) test_data.to_csv('test.csv', index = None, header=None) # load the data into S3 sess.upload_data('train.csv', bucket=BUCKET, key_prefix=os.path.join(PREFIX, 'train')) sess.upload_data('test.csv', bucket=BUCKET, key_prefix=os.path.join(PREFIX, 'test')) #load the train and test data filenames from Amazon S3 s3_input_train = sagemaker.s3_input(s3_data='s3://{}/{}/train'.format(BUCKET, PREFIX), content_type='csv') s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/{}/test/'.format(BUCKET, PREFIX), content_type='csv') ``` ### Train and Deploy the model SageMaker will set up the instance types needed and copy the data over to train the model. This may take about **3** minutes to complete training. Once the model is trained, we will deploy the model as an endpoint. Again, SageMaker will set up the instance required, copy the inference image and the inference code and create a HTTPS endpoint. This may take **4-5** minutes. For more details on how SageMaker creates an endpoint, visit: https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-hosting.html ``` from sagemaker.amazon.amazon_estimator import get_image_uri container = get_image_uri(region, 'xgboost', '0.90-1') xgb = sagemaker.estimator.Estimator(container, role, train_instance_count=1, train_instance_type='ml.m5.xlarge', output_path=OUTPUT_PATH, sagemaker_session=sess) xgb.set_hyperparameters(max_depth=2, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, silent=0, objective='binary:logistic', num_round=100, eval_metric='auc') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) xgb_predictor = xgb.deploy(initial_instance_count = 1, instance_type = 'ml.m5.xlarge') from sagemaker.predictor import csv_serializer xgb_predictor.content_type = 'text/csv' xgb_predictor.serializer = csv_serializer xgb_predictor.deserializer = None ## Lets now run predictions on our test set and use it to create a table containing our outputs. import numpy as np def predict(data, model, rows=500): split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1)) predictions = '' for array in split_array: predictions = ','.join([predictions, model.predict(array).decode('utf-8')]) return np.round(np.fromstring(predictions[1:], sep=',')) ## Generate predictions on the test set for the difference models predictions = predict(test_data[list(test_data.columns)[1:]].values, xgb_predictor) predictions ``` ### Creating human review Workteam or Workforce A workforce is the group of workers that you have selected to label your dataset. You can choose either the Amazon Mechanical Turk workforce, a vendor-managed workforce, or you can create your own private workforce for human reviews. Whichever workforce type you choose, Amazon Augmented AI takes care of sending tasks to workers. When you use a private workforce, you also create work teams, a group of workers from your workforce that are assigned to Amazon Augmented AI human review tasks. You can have multiple work teams and can assign one or more work teams to each job. To create your Workteam, visit the instructions here: https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management.html After you have created your workteam, replace YOUR_WORKTEAM_ARN below ``` WORKTEAM_ARN = 'arn:aws:sagemaker:us-east-2:{account_num}:workteam/private-crowd/stefan-team'#'YOUR_WORKTEAM_ARN' ``` Visit: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-permissions-security.html to add the necessary permissions to your role ### Client Setup Here we are going to setup the rest of our clients. ``` import io import uuid import time timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) # Amazon SageMaker client sagemaker_client = boto3.client('sagemaker', region) # Amazon Augment AI (A2I) client a2i = boto3.client('sagemaker-a2i-runtime') # Amazon S3 client s3 = boto3.client('s3', region) # Flow definition name - this value is unique per account and region. You can also provide your own value here. flowDefinitionName = 'fd-sagemaker-tabular-data-demo-' + timestamp # Task UI name - this value is unique per account and region. You can also provide your own value here. taskUIName = 'ui-sagemaker-tabular-data-demo-' + timestamp ``` ## Create Control Plane Resources ### Create Human Task UI Create a human task UI resource, giving a UI template in liquid html. This template will be rendered to the human workers whenever human loop is required. For over 70 pre built UIs, check: https://github.com/aws-samples/amazon-a2i-sample-task-uis. We will use the following template to render both the test dataset, as well as the model predictions ``` template = r""" <script src="https://assets.crowd.aws/crowd-html-elements.js"></script> <style> table, tr, th, td { border: 1px solid black; border-collapse: collapse; padding: 5px; } </style> <crowd-form> <div> <h1>Instructions</h1> <p>Please review the predictions in the Predictions table based on the input data table below, and make corrections where appropriate. </p> <p> Here are the labels: </p> <p> 0: Benign </p> <p> 1: Malignant </p> </div> <div> <h3> Breast cancer dataset </h3> <div id="my_table"> {{ task.input.table | skip_autoescape }} </div> </div> <br> <h1> Predictions Table </h1> <table> <tr> <th>ROW NUMBER</th> <th>MODEL PREDICTION</th> <th>AGREE/DISAGREE WITH ML RATING?</th> <th>YOUR PREDICTION</th> <th>CHANGE REASON </th> </tr> {% for pair in task.input.Pairs %} <tr> <td>{{ pair.row }}</td> <td><crowd-text-area name="predicted{{ forloop.index }}" value="{{ pair.prediction }}"></crowd-text-area></td> <td> <p> <input type="radio" id="agree{{ forloop.index }}" name="rating{{ forloop.index }}" value="agree" required> <label for="agree{{ forloop.index }}">Agree</label> </p> <p> <input type="radio" id="disagree{{ forloop.index }}" name="rating{{ forloop.index }}" value="disagree" required> <label for="disagree{{ forloop.index }}">Disagree</label> </p> </td> <td> <p> <input type="text" name="True Prediction" placeholder="Enter your Prediction" /> </p> </td> <td> <p> <input type="text" name="Change Reason" placeholder="Explain why you changed the prediction" /> </p> </td> </tr> {% endfor %} </table> </crowd-form> """ def create_task_ui(): ''' Creates a Human Task UI resource. Returns: struct: HumanTaskUiArn ''' response = sagemaker_client.create_human_task_ui( HumanTaskUiName=taskUIName, UiTemplate={'Content': template}) return response # Create task UI humanTaskUiResponse = create_task_ui() humanTaskUiArn = humanTaskUiResponse['HumanTaskUiArn'] print(humanTaskUiArn) ``` ### Create the Flow Definition In this section, we're going to create a flow definition definition. Flow Definitions allow us to specify: * The workforce that your tasks will be sent to. * The instructions that your workforce will receive. This is called a worker task template. * The configuration of your worker tasks, including the number of workers that receive a task and time limits to complete tasks. * Where your output data will be stored. This demo is going to use the API, but you can optionally create this workflow definition in the console as well. For more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html. ``` create_workflow_definition_response = sagemaker_client.create_flow_definition( FlowDefinitionName= flowDefinitionName, RoleArn= role, HumanLoopConfig= { "WorkteamArn": WORKTEAM_ARN, "HumanTaskUiArn": humanTaskUiArn, "TaskCount": 1, "TaskDescription": "Make sure the labels are correct", "TaskTitle": "tabular data a2i demo" }, OutputConfig={ "S3OutputPath" : OUTPUT_PATH } ) flowDefinitionArn = create_workflow_definition_response['FlowDefinitionArn'] # let's save this ARN for future use # Describe flow definition - status should be active for x in range(60): describeFlowDefinitionResponse = sagemaker_client.describe_flow_definition(FlowDefinitionName=flowDefinitionName) print(describeFlowDefinitionResponse['FlowDefinitionStatus']) if (describeFlowDefinitionResponse['FlowDefinitionStatus'] == 'Active'): print("Flow Definition is active") break time.sleep(2) ``` ## Human Loops Now that we have setup our Flow Definition, we are ready to start the human loop to have the reviewers asynchronously review the outputs generated by our model. First we need to create a dictionary containing our model outputs, so we can load it dynamically ``` item_list = [{'row': "ROW_{}".format(x), 'prediction': predictions[x]} for x in range(5)] item_list ip_content = {"table": test_data.reset_index().drop(columns = ['index', 'label']).head().to_html(), 'Pairs': item_list } import json humanLoopName = str(uuid.uuid4()) start_loop_response = a2i.start_human_loop( HumanLoopName=humanLoopName, FlowDefinitionArn=flowDefinitionArn, HumanLoopInput={ "InputContent": json.dumps(ip_content) } ) ``` ### Check Status of Human Loop ``` resp = a2i.describe_human_loop(HumanLoopName=humanLoopName) print(f'HumanLoop Name: {humanLoopName}') print(f'HumanLoop Status: {resp["HumanLoopStatus"]}') print(f'HumanLoop Output Destination: {resp["HumanLoopOutput"]}') print('\n') if resp["HumanLoopStatus"] == "Completed": completed_human_loops.append(resp) ``` ### Wait For Workers to Complete Task Since we are using private workteam, we should go to the labling UI to perform the inspection ourselves. ``` workteamName = WORKTEAM_ARN[WORKTEAM_ARN.rfind('/') + 1:] print("Navigate to the private worker portal and do the tasks. Make sure you've invited yourself to your workteam!") print('https://' + sagemaker_client.describe_workteam(WorkteamName=workteamName)['Workteam']['SubDomain']) ``` ### Check Status of Human Loop Again ``` resp = a2i.describe_human_loop(HumanLoopName=humanLoopName) print(f'HumanLoop Name: {humanLoopName}') print(f'HumanLoop Status: {resp["HumanLoopStatus"]}') print(f'HumanLoop Output Destination: {resp["HumanLoopOutput"]}') print('\n') if resp["HumanLoopStatus"] == "Completed": completed_human_loops.append(resp) ``` ### View Task Results ``` import re import pprint pp = pprint.PrettyPrinter(indent=4) for resp in completed_human_loops: splitted_string = re.split('s3://' + BUCKET + '/', resp['HumanLoopOutput']['OutputS3Uri']) output_bucket_key = splitted_string[1] response = s3.get_object(Bucket=BUCKET, Key=output_bucket_key) content = response["Body"].read() json_output = json.loads(content) pp.pprint(json_output) print('\n') ``` ### Delete Resources ``` a2i.stop_human_loop(HumanLoopName=humanLoopName) a2i.delete_human_loop(HumanLoopName=humanLoopName) xgb_predictor.delete_endpoint() ```
github_jupyter
<a href="https://colab.research.google.com/github/google/applied-machine-learning-intensive/blob/master/content/00_prerequisites/01_intermediate_python/00-objects.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #### Copyright 2019 Google LLC. ``` # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Intermediate Python - Objects At this point in your Python journey, you should be familiar with the following concepts and when to use them. - different data types - string - number - list - tuple - dictionary - printing - `for` and `while` loops - `if`/`else` statements - functions - code commenting In this lab, we will move into the more advanced concept of objects. You may have heard of object-oriented programming, especially in other languages. If not, don't worry. This will be a gentle introduction that will give you the skills you need to know in order to build your own objects in Python. ## Objects ### Introduction It is likely that you have seen programs written in a procedural programming style. These programs consist of procedures (also called functions and methods) that operate on data passed to them. Imagine that you had a function `compute_paycheck` that computed the weekly paycheck for a worker. If you wanted to compute the paycheck of a given employee in a procedural style, you would pass the necessary data to compute the pay to the `compute_paycheck` function. ``` employee_data = get_employee_data() pay = compute_paycheck(employee_data) ``` Though you *could* write something like this in Python, it isn't necessarily idiomatic to the language. What this means is that Python tends to work better and look better when you use **object-oriented programming**. Python is an object-oriented language. This means that your program can be modeled as logical objects with methods built in to the object to operate on data. In an object-oriented programming style, you could encode each employee as its own object, and write a method called `compute_paycheck` which returns the weekly paycheck for a given employee. In that case, computing an employee's paycheck would look more like the following: ``` employee_data = get_employee_data() pay = employee_data.compute_paycheck() ``` In this case, `compute_paycheck` is a **method** that is bound to the returned argument `employee_data`, and can be called directly on this type. A method is just a function that is tied to an object. However, the terms "function" and "method" are often used interchangeably. See [here](https://stackoverflow.com/questions/155609/whats-the-difference-between-a-method-and-a-function) for a more in-depth discussion. Using object-oriented programming does not mean that you can't pass data to functions/methods. Imagine that the employee data only contained information like hourly wage and tax holdouts. In this case, `compute_paycheck` would need to know the number of hours worked in order to calculate the employee's pay. ``` employee_data = get_employee_data() hours_worked = get_hours_worked() pay = employee_data.compute_paycheck(hours_worked) ``` In the example above, you can see the procedural and object-oriented styles mixed together in the same block. (The `hours_worked` variable is computed using the `get_hours_worked` function, and the `employee_data` variable is computed using the `get_employee_data` function.) However, even these variables could be computed in an object-oriented style. For example, `hours_worked` could come from an object representing the time clock, and `employee_data` could come from an object representing the HR system. ``` employee_data = hr.get_employee_data() hours_worked = timeclock.get_hours_worked() employee_data.compute_pay(hours_worked) ``` In Python, everything is an object. The code below uses the inbuilt `isinstance` function to check if each item is an instance of an `object`. ``` for data in ( 1, # integer 3.5, # float "Hello Python", # string (1, "funny", "tuple"), # tuple ["a", "list"], # list {"and": "a", "dict": 2} # dictionary ): print("Is {} an object? {}".format(type(data), isinstance(data, object))) ``` You can create your own object using the `class` keyword. ``` class Cow: pass ``` Why did we use the keyword `class` and not `object`? You can think of the class as a template for the object, and the object itself as an instance of the class. To create an object from a class, you use parentheses to instantiate the class. ``` # Create an instance of Cow called elsie elsie = Cow() # Create an instance of Cow called annabelle annabelle = Cow() print(Cow) print(elsie) print(annabelle) ``` Notice that `Cow` is a `class` and that `elsie` and `annabelle` are Cow objects. The text following `at` indicates where in memory these objects are stored. You might have to look closely, but `elsie` and `annabelle` are located at different locations in memory. Adding methods to a class is easy. You simply create a function, but have it indented so that it is inside the class. ``` class Cow: def talk(): print("Moo") ``` You can then call the method directly on the class. ``` Cow.talk() ``` While you can call `talk()` on the `Cow` class, you can't actually call `talk()` on any instances of `Cow`, such as `elsie` and `annabelle`. In order to make Elsie and Annabelle talk, we need to pass the `self` keyword to the `talk` method. In general, all object functions should pass **`self`** as the first parameter. Let's modify the `Cow` class to make `talk` an object (also known as instance) function instead of a class function. ``` class Cow: def talk(self): print("Moo") elsie = Cow() elsie.talk() ``` Now `talk` can be called on objects of type `Cow`, but not on the `Cow` class itself. You can add as many methods as you want to a class. ``` class Cow: def talk(self): print("Moo") def eat(self): print("Crunch") elsie = Cow() elsie.eat() elsie.talk() ``` ### Initialization There are special functions that you can define in a class. These functions do things like initialize an object, convert an object to a string, determine the length of an object, and more. These special functions all start and end with double-underscores. The most common of these is `__init__`. `__init__` initializes the class. Let's add an initializer to our `Cow` class. ``` class Cow: def __init__(self, name): self.__name = name def talk(self): print("{} says Moo".format(self.__name)) annie = Cow("Annabelle") annie.talk() elly = Cow("Elsie") elly.talk() ``` There are a few new concepts in the code above. 1. `__init__` You can see that `__init__` is passed the object itself, commonly referred to as **self**. `__init__` can also accept any number of other arguments. In this case, we want the name of the cow. We save that name in the object (represented by `self`), and also use it in the `talk` method. 2. `__name` Notice that the instance variable `__name` has two underscores before it. This naming is a way to tell Python to hide the variable from the rest of the program, so that it is only accessible to other methods within the object. This data hiding provides [**encapsulation**](https://en.wikipedia.org/wiki/Encapsulation_(computer_programming)) which is an important concept in object-oriented programming. Had `__name` been called `name` or `_name` (single-underscore), it would not be hidden, and could then be accessed on the object (eg. `annie.name`). There are many different double-underscore (dunder) methods. They are all documented in the [official Python documentation](https://docs.python.org/3/reference/datamodel.html#special-method-names). ### Inheritance Python objects are able to inherit functionality from other Python objects. Let's look at an example. ``` class Animal: def talk(self): print("...") # The sound of silence def eat(self): print("crunch") class Cow(Animal): def talk(self): print("Moo") class Worm(Animal): pass cow = Cow() worm = Worm() cow.talk() cow.eat() worm.talk() worm.eat() ``` In the code above, we create an `Animal` class that has a generic implementation of the `talk` and `eat` functions that we created earlier. We then create a `Cow` object that implements its own `talk` function but relies on the `Animal`'s `eat` function. We also create a `Worm` class that fully relies on `Animal` to provide `talk` and `eat` functions. The reason this is so useful is that we can scaffold classes to inherit base features. For example, we might want different base classes `Plant` and `Animal` that represent generic plants and animals respectively. Then, we could create different plants such as `Cactus` and `Sunflower` inheriting from the `Plant` class, and different animals such as `Cow` and `Worm`. Python also supports multiple inheritance and many layers of inheritance. In the code below, `move` and `eat` are methods of the base class `Animal`, which are then inherited by different types of animals. ``` class Animal: def move(self): pass def eat(self): pass class Legless(Animal): def move(self): print("Wriggle wriggle") class Legged(Animal): def move(self): print("Trot trot trot") class Toothless(Animal): def eat(self): print("Slurp") class Toothed(Animal): def eat(self): print("Chomp") class Worm(Legless, Toothless): pass class Cow(Legged, Toothed): pass class Rock: pass def live(animal): if isinstance(animal, Animal): animal.move() animal.eat() w = Worm() c = Cow() r = Rock() print("The worm goes...") live(w) print("The cow goes...") live(c) print("The rock goes...") live(r) ``` # Exercises ## Exercise 1 In the code block below, create a `Cow` class that has an `__init__` method that accepts a name and breed so that a cow can be created like: ``` elsie = Cow("Elsie", "Jersey") ``` Name the class variables **name** and **breed**. Make sure that if the name and breed of cow passed to the constructor are changed, the values stored in the instance variables reflect the different names. In other words, don't hard-code "Elsie" and "Jersey". ### Student Solution ``` # Your code goes here ``` --- ## Exercise 2 Take the `Cow` class that you implemented in exercise one, and add a double-underscore method so that if you create a cow using: ``` cow = Cow("Elsie", "Shorthorn") ``` Calling `print(cow)` prints: > Elsie is a Shorthorn cow. Hint: you may want to look through the [Python documentation on special method names](https://docs.python.org/3/reference/datamodel.html#special-method-names) to find the dunder method that dictates a string representation of the object. ### Student Solution ``` # Your code goes here ``` --- ## Exercise 3 Take the `Cow` class that you implemented in exercise two (or one), and add a double-underscore method so that `print(repr(elsie))` prints: > Cow("Elsie", "Jersey") ### Student Solution ``` # Your code goes here ``` --- ## Exercise 4 Fix the Car class in the code inheritance below so that "Vroom!" is printed. ### Student Solution ``` # Your code goes here class Vehicle: def go(): pass class Car: def go(): print("Vroom!") # No changes below here! car = Car() if isinstance(car, Vehicle): car.go() ``` ---
github_jupyter
``` import numpy as np import librosa import glob import os from random import randint import torch import torch.nn as nn from torch.utils import data import torch.optim as optim from torch.utils.data import DataLoader from torch.utils.data import sampler import matplotlib.pyplot as plt %matplotlib inline import torch.nn.functional as F import import_ipynb from sen_net import * from sen_dataloader import * torch.cuda.set_device(3) print(torch.cuda.current_device()) SENmodel = SEN_classify().double() state_dict = torch.load('../../model/baseline_sen.pkl') SENmodel.load_state_dict(state_dict) SENmodel = SENmodel.cuda() SENmodel = SENmodel.eval() result_score_dict = {} result = [] print(len(my_test_data)) with torch.no_grad(): for key,token in enumerate(my_test_data): x1_name,x2_name,exit_index = load_exits_file(my_test_data,key) X1 = torch.from_numpy(np.expand_dims(np.transpose(np.load(x1_name)),axis=2)) X2 = torch.from_numpy(np.expand_dims(np.transpose(np.load(x2_name)),axis=2)) X = torch.cat((X1,X2),dim=2) X = X.unsqueeze(0) X = X.unsqueeze(0) x1 =torch.transpose((X[...,0]),2,3).cuda() x2 =torch.transpose((X[...,1]),2,3).cuda() predict_label = SENmodel(x1,x2).detach().cpu() result_score_dict[token] = predict_label[0][0] print(len(result_score_dict)) from __future__ import division testLength = 1000 def get_result_key(index,x1,x2): return "song"+str(index)+"_"+str(x1)+","+"song"+str(index)+"_"+str(x2) def get_result_score(index): scoreList = [] score_123 = result_score_dict[get_result_key(index,1,2)] + result_score_dict[get_result_key(index,2,3)] score_132 = result_score_dict[get_result_key(index,1,3)] + result_score_dict[get_result_key(index,3,2)] score_213 = result_score_dict[get_result_key(index,2,1)] + result_score_dict[get_result_key(index,1,3)] score_231 = result_score_dict[get_result_key(index,2,3)] + result_score_dict[get_result_key(index,3,1)] score_312 = result_score_dict[get_result_key(index,3,1)] + result_score_dict[get_result_key(index,1,2)] score_321 = result_score_dict[get_result_key(index,3,2)] + result_score_dict[get_result_key(index,2,1)] scoreList.append(score_123) scoreList.append(score_132) scoreList.append(score_213) scoreList.append(score_231) scoreList.append(score_312) scoreList.append(score_321) import operator index, value = max(enumerate(scoreList), key=operator.itemgetter(1)) return index,value def cal_accuracy(index): if(index==0): return 2,True elif(index==1): return 0,False elif(index==2): return 0,False elif(index==3): return 1,False elif(index==4): return 1,False elif(index==5): return 0,False else: return -1,False GA = [0,0,0,0,0,0] PA = 0.0 for i in range(6000,7000): index, value = get_result_score(i) # print(index,value) pair,_ = cal_accuracy(index) PA += pair/3 if(index == 0): GA[0] += 1 if(index == 1): GA[1] += 1 if(index == 2): GA[2] += 1 if(index == 3): GA[3] += 1 if(index == 4): GA[4] += 1 if(index == 5): GA[5] += 1 #GA[0] is "123" #GA[1,2,3,4,5] is "132,213,231,312,321" print("Accuracy:",GA[0]/testLength) # for i in range(6): # print("Accuracy:",GA[i]/testLength) ```
github_jupyter
# Self-Driving Car Engineer Nanodegree ## Deep Learning ## Project: Build a Traffic Sign Recognition Classifier In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project. The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file. >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. --- ## Step 0: Load The Data ``` # Load pickled data import pickle import numpy as np # TODO: Fill this in based on where you saved the training and testing data training_file = "train.p" validation_file="valid.p" testing_file = "test.p" with open(training_file, mode='rb') as f: train = pickle.load(f) with open(validation_file, mode='rb') as f: valid = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_valid, y_valid = valid['features'], valid['labels'] X_test, y_test = test['features'], test['labels'] ``` --- ## Step 1: Dataset Summary & Exploration The pickled data is a dictionary with 4 key/value pairs: - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels). - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id. - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image. - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES** Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. ### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas ``` ### Replace each question mark with the appropriate value. ### Use python, pandas or numpy methods rather than hard coding the results # TODO: Number of training examples n_train = X_train.shape[0] print(n_train) # TODO: Number of validation examples n_validation = X_valid.shape[0] # TODO: Number of testing examples. n_test = X_test.shape[0] # TODO: What's the shape of an traffic sign image? image_shape = X_test.shape[1:3] # TODO: How many unique classes/labels there are in the dataset. n_classes = len(np.unique(y_train)) print("Number of training examples =", n_train) print("Number of validation examples", n_validation) print("Number of testing examples =", n_test) print("Image data shape =", image_shape) print("Number of classes =", n_classes) ``` ### Include an exploratory visualization of the dataset Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python. **NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others? ``` ### Data exploration visualization code goes here. ### Feel free to use as many code cells as needed. import matplotlib.pyplot as plt import matplotlib.image as mpimg # function to convert rgb to grayscale def rgb2gray(rgb): return np.expand_dims(np.dot(rgb[...,:3], [0.299, 0.587, 0.114]), axis = 3) # Visualizations will be shown in the notebook. %matplotlib inline fig=plt.figure(figsize=(8, 8)) columns = 4 rows = 5 for i in range(1, columns*rows +1): index = np.random.randint(n_train) img = X_train[index] fig.add_subplot(rows, columns, i) plt.imshow(img) plt.show() # Visulize number of class of train data hist, bins = np.histogram(y_train, bins = n_classes) center = 1 plt.bar(bins[1:], hist) plt.title("Histogram of training date") plt.show() ``` ---- ## Step 2: Design and Test a Model Architecture Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset). The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. There are various aspects to consider when thinking about this problem: - Neural network architecture (is the network over or underfitting?) - Play around preprocessing techniques (normalization, rgb to grayscale, etc) - Number of examples per label (some have more than others). - Generate fake data. Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. ### Pre-process the Data Set (normalization, grayscale, etc.) Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project. Other pre-processing steps are optional. You can try different techniques to see if it improves performance. Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. ``` ### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include ### converting to grayscale, etc. ### Feel free to use as many code cells as needed. print(X_train.shape) #Store original data X_train_original = X_train X_test_original = X_test X_valid_original = X_valid #Gray scale the image X_train_gray = rgb2gray(X_train) X_test_gray = rgb2gray(X_test) X_valid_gray = rgb2gray(X_valid) print("Mean before normalization:",np.mean(X_train_gray)) #Normalize the image X_train_norm = (X_train_gray - 128)/128 X_test_norm = (X_test_gray - 128)/128 X_valid_norm = (X_valid_gray - 128)/128 print("Shape of the gray scale image:",X_train_gray.shape) print("Mean after normalization",np.mean(X_train_gray)) #Data selection: select gray scale, normalized images, or original image for test X_train = X_train_norm - np.mean(X_train_norm) X_valid = X_valid_norm - np.mean(X_valid_norm) X_test = X_test_norm - np.mean(X_test_norm) print(np.mean(X_train)) ``` ### Model Architecture ``` ### Define your architecture here. ### Feel free to use as many code cells as needed. from sklearn.utils import shuffle import tensorflow as tf EPOCHS = 100 BATCH_SIZE = 128 from tensorflow.contrib.layers import flatten def LeNet(x): mu = 0 sigma = 0.1 #Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6 conv1_W = tf.Variable(tf.truncated_normal(shape = (5, 5, 1, 6), mean = mu, stddev = sigma)) conv1_b = tf.Variable(tf.zeros(6)) conv1 = tf.nn.conv2d(x, conv1_W, strides = [1, 1, 1, 1], padding='VALID') + conv1_b #Activation conv1 = tf.nn.relu(conv1) #Pooling. Input = 28x28x6 Output = 14x14x6 conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') #Layer 2: Convolutional. Output = 10x10x16 conv2_W = tf.Variable(tf.truncated_normal(shape = (5, 5, 6, 16), mean = mu, stddev = sigma)) conv2_b = tf.Variable(tf.zeros(16)) conv2 = tf.nn.conv2d(conv1, conv2_W, strides = [1, 1, 1, 1], padding='VALID') + conv2_b #Activation conv2 = tf.nn.relu(conv2) #Pooling. Output = 5x5x16 conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') #Flatten fc0 = flatten(conv2) #Layer 3: Fully Connected. Input = 400 Output = 120 fc1_W = tf.Variable(tf.truncated_normal(shape = (400, 120), mean = mu, stddev = sigma)) fc1_b = tf.Variable(tf.zeros(120)) fc1 = tf.matmul(fc0, fc1_W) + fc1_b #Activation fc1 = tf.nn.relu(fc1) #Dropout fc1 = tf.nn.dropout(fc1, 0.7) #Layer 4: Fully Connected. Input = 120. Output = 84 fc2_W = tf.Variable(tf.truncated_normal(shape = (120, 84), mean = mu, stddev = sigma)) fc2_b = tf.Variable(tf.zeros(84)) fc2 = tf.matmul(fc1, fc2_W) + fc2_b #Activation fc2 = tf.nn.relu(fc2) #Dropout fc2 = tf.nn.dropout(fc2, 0.7) #Layer 5: Fully Connected input = 84 output = 43 fc3_W = tf.Variable(tf.truncated_normal(shape = (84, 43), mean = mu, stddev = sigma)) fc3_b = tf.Variable(tf.zeros(43)) logits = tf.matmul(fc2, fc3_W) + fc3_b return logits ``` ### Train, Validate and Test the Model A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting. ``` ### Train your model here. ### Calculate and report the accuracy on the training and validation set. ### Once a final model architecture is selected, ### the accuracy on the test set should be calculated and reported as well. ### Feel free to use as many code cells as needed. x = tf.placeholder(tf.float32, (None, 32, 32, 1)) y = tf.placeholder(tf.int32, (None)) one_hot_y = tf.one_hot(y, 43) rate = 0.001 logits = LeNet(x) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels = one_hot_y, logits = logits) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate = rate) training_operation = optimizer.minimize(loss_operation) #Evaluation correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) def evaluation(X_data, y_data): num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, BATCH_SIZE): batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE] accuracy = sess.run(accuracy_operation, feed_dict={x:batch_x, y:batch_y}) total_accuracy += (accuracy * len(batch_x)) return total_accuracy / num_examples # Train the model print(X_train.shape) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) print("Training...") print() for i in range(EPOCHS): X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, BATCH_SIZE): end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y}) #print training accuracy training_accuracy = evaluation(X_train, y_train) print("EPOCH {} ...".format(i+1)) print("Training Accuracy = {:.3f}".format(training_accuracy)) #print validation accuracy validation_accuracy = evaluation(X_valid, y_valid) print("Validation Accuracy = {:.3f}".format(validation_accuracy)) #Test the model test_accuracy = evaluation(X_test, y_test) print("Test accuracy is:", test_accuracy) print("Done") try: saver except NameError: saver = tf.train.Saver() saver.save(sess, './lenet') print("Model saved") ``` --- ## Step 3: Test a Model on New Images To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type. You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name. ### Load and Output the Images ``` ### Load the images and plot them here. ### Feel free to use as many code cells as needed. import cv2 import numpy as np import matplotlib.pyplot as plt from os import listdir from os.path import isfile, join image_path = "./internet_images/" files = [f for f in listdir(image_path) if isfile(join(image_path,f))] total_image = len(files) my_image = [] for i in range(len(files)): cur_path = image_path + files[i] #load image as grayscale img = cv2.cvtColor(cv2.imread(cur_path), cv2.COLOR_BGR2RGB) img = rgb2gray(img) resize_img = cv2.resize(img,(32,32)) #preprocess resize_img = (resize_img -128)/128 resize_img = resize_img - np.mean(resize_img) my_image.append(resize_img) %matplotlib inline fig=plt.figure(figsize=(8, 8)) columns = total_image rows = 1 for i in range(1,columns + 1): img = my_image[i-1] fig.add_subplot(rows, columns, i) plt.imshow(img) plt.show() img_label = [11,25,18,4,40] ``` ### Predict the Sign Type for Each Image ``` ### Run the predictions here and use the model to output the prediction for each image. ### Make sure to pre-process the images with the same pre-processing pipeline used earlier. ### Feel free to use as many code cells as needed. my_image_exp = np.expand_dims(my_image, axis=3) print(my_image_exp.shape) softmax_logits = tf.nn.softmax(logits) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.import_meta_graph('./lenet.meta') saver.restore(sess, "./lenet") my_softmax_logits = sess.run(softmax_logits, feed_dict={x: my_image_exp}) pred_labels = [] for i in range(len(my_softmax_logits)): cur_max = max(my_softmax_logits[i]) max_index = [i for i, j in enumerate(my_softmax_logits[i]) if j == cur_max] pred_labels.append(max_index) print(pred_labels) ``` ### Analyze Performance ``` ### Calculate the accuracy for these 5 new images. ### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images. with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver2 = tf.train.import_meta_graph('./lenet.meta') saver2.restore(sess, "./lenet") test_accuracy = evaluation(my_image_exp, img_label) print("Test Set Accuracy = {:.3f}".format(test_accuracy)) ``` ### Output Top 5 Softmax Probabilities For Each Image Found on the Web For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here. The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image. `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids. Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability: ``` # (5, 6) array a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497, 0.12789202], [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401, 0.15899337], [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 , 0.23892179], [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 , 0.16505091], [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137, 0.09155967]]) ``` Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces: ``` TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202], [ 0.28086119, 0.27569815, 0.18063401], [ 0.26076848, 0.23892179, 0.23664738], [ 0.29198961, 0.26234032, 0.16505091], [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5], [0, 1, 4], [0, 5, 1], [1, 3, 5], [1, 4, 3]], dtype=int32)) ``` Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices. ``` ### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web. ### Feel free to use as many code cells as needed. k_size = 5 softmax_logits = tf.nn.softmax(logits) top_k = tf.nn.top_k(softmax_logits, k=k_size) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) my_saver = tf.train.import_meta_graph('./lenet.meta') saver.restore(sess, "./lenet") my_softmax_logits = sess.run(softmax_logits, feed_dict={x: my_image_exp}) my_top_k = sess.run(top_k, feed_dict={x: my_image_exp}) print(my_top_k[1]) figures = [] labels = [] %matplotlib inline fig=plt.figure(figsize=(20, 20)) columns = k_size + 1 rows = total_image for r in range(1, rows + 1): for i in range(1,columns + 1): if i == 1: img = my_image[r-1] else: img = X_test[np.argwhere(y_test == my_top_k[1][r-1][i-2])[0]].squeeze() ax = fig.add_subplot(rows, columns, (r-1)*columns + i) if i == 1: ax.title.set_text('Original') else: title_str = "Probability=" + str(int(100*my_top_k[0][r-1][i-2])) ax.title.set_text(title_str) plt.imshow(img) plt.show() ``` ### Project Writeup Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file. > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. --- ## Step 4 (Optional): Visualize the Neural Network's State with Test Images This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol. Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable. For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image. <figure> <img src="visualize_cnn.png" width="380" alt="Combined Image" /> <figcaption> <p></p> <p style="text-align: center;"> Your output should look something like this (above)</p> </figcaption> </figure> <p></p> ``` ### Visualize your network's feature maps here. ### Feel free to use as many code cells as needed. # image_input: the test image being fed into the network to produce the feature maps # tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer # activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output # plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1): # Here make sure to preprocess your image_input in a way your network expects # with size, normalization, ect if needed # image_input = # Note: x should be the same name as your network's tensorflow data placeholder variable # If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function activation = tf_activation.eval(session=sess,feed_dict={x : image_input}) featuremaps = activation.shape[3] plt.figure(plt_num, figsize=(15,15)) for featuremap in range(featuremaps): plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number if activation_min != -1 & activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray") elif activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray") elif activation_min !=-1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray") else: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray") ```
github_jupyter
<img src="https://raw.githubusercontent.com/Qiskit/qiskit-tutorials/master/images/qiskit-heading.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> # _*Exercises*_ The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial. This notebook is aimed to help you understand the content of notebooks in this folder by working on the problems listed. The content of this exercise is based on the notebook [here](https://github.com/Qiskit/qiskit-tutorials/blob/master/qiskit/terra/quantum_circuits.ipynb). You'll need to run the cells to use this tutorial. To run a cell, do the following. * For laptops and desktops, click on the cell and press **Shift-Enter**. * For mobile devices, tap on the icon that appears to the left of a cell. Get started by doing this for the cell below (it will take a second or two to run). ``` #importing array and useful math functions import numpy as np #importing circuits and registers from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister #importing backends and running environment from qiskit import BasicAer, execute ``` ## Problem 1: Playing with Circuits Q: Prepare two quantum registers each of which has size 2 qubits, and a quantum register consisting of 4 bits. Q: Create a circuit using the above registers. Q: Apply NOT gate and the Hadamard gate to the the first qubit of the first register. Then, apply the controlled NOT with the first qubit as control on the first register. Apply the Hadamard gate to the second qubit of the second register. Then, apply the controlled NOT with the second qubit as control on the second register. Q: Draw the circuit. Q: Create another circuit consisting of the same register as the first circuit. Apply the controlled NOT gate with the second qubit of the first register as control and the first qubit of the second register as target to the circuit. And, draw the circuit. Q: What are the numbers of tensor factors of the first and second circuits? Q: Create another circuit by appending the second circuit to the first circuit. Q: Create a measurement circuit that store the result of measuring quantum registers to the classical register. Q: Add the measurement circuit to the combined quantum circuit by first applying barrier. Q: What is the number of tensor factor of the final circuit? Q: Run the circuit on the local simulator backend. ## Problem 2: Circuit Equivalence Q: Create a quantum circuit that consists of a two-qubit quantum register and a two-bit classical register. ### Swap gate Q: Add three controlled NOT gates to the circuit as follows: apply the controlled NOT with the first qubit as control, and then the controlled NOT with the second qubit as control, and the controlled NOT with the first qubit as control. Q: Show that the above circuit swaps the first qubit to the second qubit. (*HINT: Straightforwardly by checking all possible classical inputs (bases) and seeing the outputs*) Q: Create another circuit that consists of three controlled NOT gates on the same registers as the first circuit as follows: apply the controlled NOT with the second qubit as control, and then the controlled NOT with the first qubit as control, and the controlled NOT with the second qubit as control. Q: Show that the above circuit is equal to the first swap circuit. (*HINT: append the second circuit to the first circuit and run the resulting circuit on all possible input bits*) ## Problem 3: Equivalence up to the Global Phase Q: Create a quantum circuit which consist of a two-qubit quantum register and a two-bit classical register. Q: Using the above circuit, show that the X (or, NOT) gate is equal to the $u3$ gate with parameters $(\pi, 0, \pi)$. Those gates are equivalence up to the global phase. Q: Using the above circuit, show that the controlled NOT gate is not equal to the controlled-$u3(\pi,0,\pi)$ gate. Notice that eventhough two single-qubit gates are equivalent up to the global phase, their controlled gates can be different. # Contributing We welcome feedback and new problems to be added to this notebook.
github_jupyter
# LassoLars with Quantile Transformer This Code template is for the regression analysis using a simple LassoLars Regression with Feature Transformation technique QuantileTransformer in a pipeline. It is a lasso model implemented using the LARS algorithm. ### Required Packages ``` import warnings import numpy as np import pandas as pd import seaborn as se import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import QuantileTransformer from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error from sklearn.linear_model import LassoLars warnings.filterwarnings('ignore') ``` ### Initialization Filepath of CSV file ``` #filepath file_path= "" ``` List of features which are required for model training . ``` #x_values features=[] ``` Target feature for prediction. ``` #y_value target='' ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) df.head() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X and target/outcome to Y. ``` X=df[features] Y=df[target] ``` ### Data Preprocessing Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) ``` Calling preprocessing functions on the feature and target set. ``` x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=NullClearner(Y) X.head() ``` #### Correlation Map In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ``` f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ``` ### Data Splitting The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ``` x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123) ``` ### Feature Transformation Quantile Transformer This method transforms the features to follow a uniform or a normal distribution. Therefore, for a given feature, this transformation tends to spread out the most frequent values. It also reduces the impact of (marginal) outliers: this is therefore a robust preprocessing scheme. Transform features using quantiles information. ### Model LassoLars is a lasso model implemented using the LARS algorithm, and unlike the implementation based on coordinate descent, this yields the exact solution, which is piecewise linear as a function of the norm of its coefficients. ### Tuning parameters > **fit_intercept** -> whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations > **alpha** -> Constant that multiplies the penalty term. Defaults to 1.0. alpha = 0 is equivalent to an ordinary least square, solved by LinearRegression. For numerical reasons, using alpha = 0 with the LassoLars object is not advised and you should prefer the LinearRegression object. > **eps** -> The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. > **max_iter** -> Maximum number of iterations to perform. > **positive** -> Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients will not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (alphas_[alphas_ > 0.].min() when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. > **precompute** -> Whether to use a precomputed Gram matrix to speed up calculations. ``` model = make_pipeline(QuantileTransformer(),LassoLars()) model.fit(x_train,y_train) ``` #### Model Accuracy We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model. score: The score function returns the coefficient of determination R2 of the prediction. ``` print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100)) ``` > **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. > **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. > **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ``` y_pred=model.predict(x_test) print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100)) print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred))) print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred))) ``` #### Prediction Plot First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis. For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis. ``` plt.figure(figsize=(14,10)) plt.plot(range(20),y_test[0:20], color = "green") plt.plot(range(20),model.predict(x_test[0:20]), color = "red") plt.legend(["Actual","prediction"]) plt.title("Predicted vs True Value") plt.xlabel("Record number") plt.ylabel(target) plt.show() ``` #### Creator: Ageer Harikrishna , Github: [Profile](https://github.com/ageerHarikrishna)
github_jupyter
This notebook contains a short guide on using the solvers w/o backprop or computing gradients. Some issues of interest include: 1. How to define and solve SDEs with this codebase 1. How to run things on a GPU 1. How to gain control over the randomness and enforce deterministic behavior with fixed seeds (e.g. when testing) 1. The subtlety of noise type in SDEs The other file in the `examples` folder (`latent_sde.py`) contains the use case where gradients need to be taken to fit parameters. ``` import torch from torch import nn import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) %matplotlib inline import matplotlib.pyplot as plt from torchsde import sdeint, BrownianPath, BrownianTree ``` Just like how each ordinary differential equation (ODE) is governed by a vector field, a stochastic differential equation (SDE) is governed by two vector fields, which are called the **drift** and **diffusion** functions: $$dx(t) = \underbrace{f(x(t), t, \theta_f)}_{\text{drift}} dt + \underbrace{g(x(t), t, \theta_g)}_{\text{diffusion}} dW(t).$$ The output of $f$ is of the same size as the $d$-dimensional state, whereas the output of $g$ may be a matrix of size $(d, m)$. Here, $W(t)$ is the Brownian motion (aka Wiener process), and it may be $m$ dimensional. It is a stochastic process, and each random draw produces a function of time. ### 1. Solving a simple SDE To implement an SDE, we create a class with the functions `f` and `g`: ``` class SDE(nn.Module): def __init__(self): super().__init__() self.theta = nn.Parameter(torch.tensor(0.1), requires_grad=False) # Scalar parameter. self.noise_type = "diagonal" self.sde_type = "ito" def f(self, t, y): return torch.sin(t) + self.theta * y def g(self, t, y): return 0.3 * torch.sigmoid(torch.cos(t) * torch.exp(-y)) ``` The functions `f` and `g` are arbitrarily chosen for demonstration purposes. The attributes `noise_type` and `sde_type` are used in the solver to determine the particular numerical method being used and must be included. We use `diagonal` here, meaning the output of `g` should be a vector with the same shape as input `y`, and it is an element-wise function. Note that for any other noise type, we expect the output of `g` to be a matrix, and batch matrix-vector product is performed under the hood. The requirement of element-wise function is a rather technical condition to ensure the high-order solvers attain their theoretically derived efficiency. All solvers in the codebase are based on [Itô stochastic integrals](https://en.wikipedia.org/wiki/It%C3%B4_calculus), so we use `ito` for the `sde_type` attribute. The library also has a base class `SDEIto`, which can be inherited from and imported directly, and saves the extra line of setting the `sde_type` attribute. As a side note, our adjoint computation internally computes a Stratonovich correction term and performs the reverse pass with it. We plan to add solvers based on stratonovich SDEs in the future. Now we instantiate an object of the SDE class and call the function `sdeint` on it. ``` batch_size, d, T = 3, 1, 100 sde = SDE() ts = torch.linspace(0, 1, T) y0 = torch.zeros(batch_size, 1).fill_(0.1) # (batch_size, d) with torch.no_grad(): ys = sdeint(sde, y0, ts, method='srk') # (T, batch_size, d) = (100, 3, 1). plt.figure() for i in range(batch_size): plt.plot(ts, ys[:, i].squeeze(), marker='x', label=f'sample {i}') plt.xlabel('$t$') plt.ylabel('$y_t$') plt.legend() plt.show() ``` `method='srk'` means the strong order 1.5 Stochastic Runge-Kutta (SRK) method is used. Other possible methods include the strong order 0.5 `euler` and strong order 1.0 `milstein`, both of which are of slightly lower order. We stress that the drift and diffusion functions don't necessarily need to be defined as the `f` and `g` methods of the class. They can be methods with any name, so long as we provide these names to the solver when they differ from the default `f` and `g`. The following is an example where the function `h` is used as the drift. ``` class SDENewName(nn.Module): def __init__(self): super().__init__() self.theta = nn.Parameter(torch.tensor(0.1), requires_grad=False) # Scalar parameter. self.noise_type = "diagonal" self.sde_type = "ito" def h(self, t, y): return torch.sin(t) + self.theta * y def g(self, t, y): return 0.3 * torch.sigmoid(torch.cos(t) * torch.exp(-y)) sde_new_name = SDENewName() with torch.no_grad(): ys = sdeint(sde_new_name, y0, ts, method='srk', names={'drift': 'h'}) # Supply a dictionary to the argument `names`. plt.figure() for i in range(batch_size): plt.plot(ts, ys[:, i].squeeze(), marker='x', label=f'sample {i}') plt.xlabel('$t$') plt.ylabel('$y_t$') plt.legend() plt.show() ``` ### 2. Moving to GPUs Trivially, the previous code may be adapted to run on GPUs, just by moving all tensors to a GPU: ``` if torch.cuda.is_available(): gpu = torch.device('cuda') sde = SDE().to(gpu) ts = ts.to(gpu) y0 = y0.to(gpu) with torch.no_grad(): ys = sdeint(sde, y0, ts, method='srk') # (100, 3, 1). plt.figure() for i in range(batch_size): plt.plot(ts.cpu(), ys[:, i].squeeze().cpu(), marker='x', label=f'sample {i}') plt.xlabel('$t$') plt.ylabel('$y_t$') plt.legend() plt.show() ``` A side note is that multi-GPU data parallel is possible with the existing codebase, but the use case has not been tried out extensively and may require defining non-standard SDE classes and methods. ### 3. Explicit control over randomness from the Brownian motion To gain control over the randomness, we draw Brownian motion samples by instantiating objects of classes `BrownianPath` or `BrownianTree`. `BrownianPath` has fast query, but stores all previous queries, so is costly in memory. `BrownianTree` only stores objects in a fixed size cache, but has slower query, since everything else is reconstructed on the fly based on random seed splitting. Repeated queries on the same Brownian motion object gives deterministic results. Here, we use `BrownianPath` as an example. ``` ts = torch.linspace(0, 1, T) bm = BrownianPath(t0=0.0, w0=torch.zeros(batch_size, d)) bm_queries = torch.stack([bm(t) for t in ts], dim=0) plt.figure() plt.title('Query') for i in range(batch_size): plt.plot(ts, bm_queries[:, i].squeeze(), marker='x', label=f'sample {i}') plt.xlabel('$t$') plt.ylabel('$W_t$') plt.legend() plt.show() bm_queries2 = torch.stack([bm(t) for t in ts], dim=0) plt.figure() plt.title('Query again (samples should be same as before)') for i in range(batch_size): plt.plot(ts, bm_queries2[:, i].squeeze(), marker='x', label=f'sample {i}') plt.xlabel('$t$') plt.ylabel('$W_t$') plt.legend() plt.show() assert torch.allclose(bm_queries, bm_queries2) ``` In our experience, having the Brownian motion run on CPUs is usually slightly faster than having it run on GPUs (though, generally, this obviously depends on the specific hardware, software, and program). When the latter is necessary, we can achieve this by either putting `w0` on the GPU or using the `to` method of the `bm` object: ``` if torch.cuda.is_available(): # Approach 1: bm = BrownianPath(t0=0.0, w0=torch.zeros(batch_size, d).to(gpu)) # Runs on GPU. print(bm(0.5)) # Approach 2: bm = BrownianPath(t0=0.0, w0=torch.zeros(batch_size, d)) # Runs on CPU. bm.to(gpu) # Runs on GPU. print(bm(0.5)) ``` We can also feed this fixed Brownian motion sample into the solver to get deterministic behavior: ``` sde = SDE() ts = torch.linspace(0, 1, T) y0 = torch.zeros(batch_size, 1).fill_(0.1) # (batch_size, d) with torch.no_grad(): ys = sdeint(sde, y0, ts, method='srk', bm=bm) plt.figure() plt.title('Solve SDE') for i in range(batch_size): plt.plot(ts, ys[:, i].squeeze(), marker='x', label=f'sample {i}') plt.xlabel('$t$') plt.ylabel('$y_t$') plt.legend() plt.show() with torch.no_grad(): ys = sdeint(sde, y0, ts, method='srk', bm=bm) plt.figure() plt.title('Solve SDE again (samples should be same as before)') for i in range(batch_size): plt.plot(ts, ys[:, i].squeeze(), marker='x', label=f'sample {i}') plt.xlabel('$t$') plt.ylabel('$y_t$') plt.legend() plt.show() # Use a new BM sample, we expect different sample paths. bm = BrownianPath(t0=0.0, w0=torch.zeros(batch_size, d)) with torch.no_grad(): ys = sdeint(sde, y0, ts, method='srk', bm=bm) plt.figure() plt.title('Solve SDE (expect different sample paths)') for i in range(batch_size): plt.plot(ts, ys[:, i].squeeze(), marker='x', label=f'sample {i}') plt.xlabel('$t$') plt.ylabel('$y_t$') plt.legend() plt.show() ``` ### 4. Noise type of SDEs affects which solvers can be used and what strong orders can be attained The supported noise types of this codebase are "diagonal", "additive", "scalar", and "general". The following is a simple summary of each type: - "diagonal": The diffusion function is an elementwise function, with the output being the same dimension as the state (both $d$-dimensional). There are $d$ independent Brownian motions, each responsible for the noise of only a single state dimension. - "additive": The diffusion function is a constant w.r.t. the state, i.e. the derivative of the diffusion function w.r.t. the state is 0. The output of the diffusion function is of size $(d, m)$, and the system has $m$ independent Brownian motions. The integral involving the Brownian motion can be loosely interpreted as integrating a sequence of matrix-vector products. - "scalar": The diffusion function has output shape $(d, 1)$, and a single Brownian motion is shared across all state dimensions. - "general": The diffusion function has output shape $(d, m)$, and the system has $m$ independent Brownian motions. It is tempting to use the noise type configuration "general" for all problems. However, since there's little known structure for these SDEs, high-order solvers are not possible, and the current codebase only supports the `euler` method. All three methods (`euler`, `milstein`, and `srk`) are supported for all remaining noise types. Lastly, for modeling problems, our limited experience have found "diagonal" to be a good setting, where flexibility of models and tractability of numerical integration is rather well-balanced.
github_jupyter
# Batch Scoring on IBM Cloud Pak for Data (ICP4D) We are going to use this notebook to create and/or run a batch scoring job against a model that has previously been created and deployed to the Watson Machine Learning (WML) instance on Cloud Pak for Data (CP4D). ## 1.0 Install required packages There are a couple of Python packages we will use in this notebook. First we make sure the Watson Machine Learning client v3 is removed (its not installed by default) and then install/upgrade the v4 version of the client (this package is installed by default on CP4D). - WML Client: https://wml-api-pyclient-dev-v4.mybluemix.net/#repository ``` !pip uninstall watson-machine-learning-client -y !pip install --user watson-machine-learning-client-v4==1.0.99 --upgrade | tail -n 1 import json from watson_machine_learning_client import WatsonMachineLearningAPIClient ``` ## 2.0 Create Batch Deployment Job ### 2.1 Instantiate Watson Machine Learning Client To interact with the local Watson Machine Learning instance, we will be using the Python SDK. <font color=red>**<< UPDATE THE VARIABLES BELOW >>**</font> <font color='red'>Replace the `username` and `password` values of `************` with your Cloud Pak for Data `username` and `password`. The value for `url` should match the `url` for your Cloud Pak for Data cluster, which you can get from the browser address bar (be sure to include the 'https://'.</font> The credentials should look something like this (these are example values, not the ones you will use): ` wml_credentials = { "url": "https://zen.clusterid.us-south.containers.appdomain.cloud", "username": "cp4duser", "password" : "cp4dpass", "instance_id": "wml_local", "version" : "2.5.0" } ` #### NOTE: Make sure that there is no trailing forward slash `/` in the `url` ``` # Be sure to update these credentials before running the cell. wml_credentials = { "url": "******", "username": "******", "password" : "*****", "instance_id": "wml_local", "version" : "2.5.0" } wml_client = WatsonMachineLearningAPIClient(wml_credentials) wml_client.spaces.list() ``` ### 2.2 Find Deployment Space We will try to find the `GUID` for the deployment space you want to use and set it as the default space for the client. <font color=red>**<< UPDATE THE VARIABLES BELOW >>**</font> - Update with the value with the name of the deployment space where you have created the batch deployment (one of the values in the output from the cell above). ``` # Be sure to update the name of the space with the one you want to use. DEPLOYMENT_SPACE_NAME = 'INSERT-YOUR-DEPLOYMENT-SPACE-NAME-HERE' all_spaces = wml_client.spaces.get_details()['resources'] space_id = None for space in all_spaces: if space['entity']['name'] == DEPLOYMENT_SPACE_NAME: space_id = space["metadata"]["guid"] print("\nDeployment Space GUID: ", space_id) if space_id is None: print("WARNING: Your space does not exist. Create a deployment space before proceeding.") # We could programmatically create the space. #space_id = wml_client.spaces.store(meta_props={wml_client.spaces.ConfigurationMetaNames.NAME: space_name})["metadata"]["guid"] # Now set the default space to the GUID for your deployment space. If this is successful, you will see a 'SUCCESS' message. wml_client.set.default_space(space_id) # These are the models and deployments we currently have in our deployment space. wml_client.repository.list_models() wml_client.deployments.list() ``` ### 2.3 Find Batch Deployment We will try to find the batch deployment which was created. <font color=red>**<< UPDATE THE VARIABLES BELOW >>**</font> - Update with the name of the batch deployment. ``` DEPLOYMENT_NAME = 'INSERT-YOUR-BATCH-DEPLOYMENT-NAME-HERE' wml_deployments = wml_client.deployments.get_details() deployment_uid = None deployment_details = None for deployment in wml_deployments['resources']: if DEPLOYMENT_NAME == deployment['entity']['name']: deployment_uid = deployment['metadata']['guid'] deployment_details = deployment #print(json.dumps(deployment_details, indent=3)) break print("Deployment id: {}".format(deployment_uid)) wml_client.deployments.get_details(deployment_uid) ``` ### 2.4 Get Batch Test Data We will load some data to run the batch predictions. ``` import pandas as pd from project_lib import Project project = Project.access() batch_set = pd.read_csv(project.get_file('Telco-Customer-Churn-SmallBatchSet.csv')) batch_set = batch_set.drop('customerID', axis=1) batch_set.head() ``` ### 2.5 Create Job We can now use the information about the deployment and the test data to create a new job against our batch deployment. We submit the data as inline payload and want the results (i.e predictions) stored in a CSV file. ``` import time timestr = time.strftime("%Y%m%d_%H%M%S") job_payload = { wml_client.deployments.ScoringMetaNames.INPUT_DATA: [{ 'fields': batch_set.columns.values.tolist(), 'values': batch_set.values.tolist() }], wml_client.deployments.ScoringMetaNames.OUTPUT_DATA_REFERENCE: { "type": "data_asset", "connection": {}, "location": { "name": "batchres_{}_{}.csv".format(timestr,deployment_uid), "description": "results" } } } job = wml_client.deployments.create_job(deployment_id=deployment_uid, meta_props=job_payload) job_uid = wml_client.deployments.get_job_uid(job) print('Job uid = {}'.format(job_uid)) wml_client.deployments.list_jobs() ``` ## 3.0 Monitor Batch Job Status The batch job is an async operation. We can use the identifier to track its progress. Below we will just poll until the job completes (or fails). ``` def poll_async_job(client, job_uid): import time while True: job_status = client.deployments.get_job_status(job_uid) print(job_status) state = job_status['state'] if state == 'completed' or 'fail' in state: return client.deployments.get_job_details(job_uid) time.sleep(5) job_details = poll_async_job(wml_client, job_uid) wml_client.deployments.list_jobs() ``` ### 3.1 Check Results With the job complete, we can see the predictions. ``` wml_client.deployments.get_job_details() print(json.dumps(job_details, indent=2)) ``` ## Congratulations, you have created and submitted a job for batch scoring !
github_jupyter
<a href="https://www.pieriandata.com"><img src="../Pierian_Data_Logo.PNG"></a> <strong><center>Copyright by Pierian Data Inc.</center></strong> <strong><center>Created by Jose Marcial Portilla.</center></strong> # Convolutional Neural Networks for Image Classification ``` import pandas as pd import numpy as np from tensorflow.keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() ``` ## Visualizing the Image Data ``` import matplotlib.pyplot as plt %matplotlib inline x_train.shape single_image = x_train[0] single_image single_image.shape plt.imshow(single_image) ``` # PreProcessing Data We first need to make sure the labels will be understandable by our CNN. ## Labels ``` y_train y_test ``` Hmmm, looks like our labels are literally categories of numbers. We need to translate this to be "one hot encoded" so our CNN can understand, otherwise it will think this is some sort of regression problem on a continuous axis. Luckily , Keras has an easy to use function for this: ``` from tensorflow.keras.utils import to_categorical y_train.shape y_example = to_categorical(y_train) y_example y_example.shape y_example[0] y_cat_test = to_categorical(y_test,10) y_cat_train = to_categorical(y_train,10) ``` ### Processing X Data We should normalize the X data ``` single_image.max() single_image.min() x_train = x_train/255 x_test = x_test/255 scaled_single = x_train[0] scaled_single.max() plt.imshow(scaled_single) ``` ## Reshaping the Data Right now our data is 60,000 images stored in 28 by 28 pixel array formation. This is correct for a CNN, but we need to add one more dimension to show we're dealing with 1 RGB channel (since technically the images are in black and white, only showing values from 0-255 on a single channel), an color image would have 3 dimensions. ``` x_train.shape x_test.shape ``` Reshape to include channel dimension (in this case, 1 channel) ``` x_train = x_train.reshape(60000, 28, 28, 1) x_train.shape x_test = x_test.reshape(10000,28,28,1) x_test.shape ``` # Training the Model ``` from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten model = Sequential() # CONVOLUTIONAL LAYER model.add(Conv2D(filters=32, kernel_size=(4,4),input_shape=(28, 28, 1), activation='relu',)) # POOLING LAYER model.add(MaxPool2D(pool_size=(2, 2))) # FLATTEN IMAGES FROM 28 by 28 to 764 BEFORE FINAL LAYER model.add(Flatten()) # 128 NEURONS IN DENSE HIDDEN LAYER (YOU CAN CHANGE THIS NUMBER OF NEURONS) model.add(Dense(128, activation='relu')) # LAST LAYER IS THE CLASSIFIER, THUS 10 POSSIBLE CLASSES model.add(Dense(10, activation='softmax')) # https://keras.io/metrics/ model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # we can add in additional metrics https://keras.io/metrics/ model.summary() from tensorflow.keras.callbacks import EarlyStopping early_stop = EarlyStopping(monitor='val_loss',patience=2) ``` ## Train the Model ``` model.fit(x_train,y_cat_train,epochs=10,validation_data=(x_test,y_cat_test),callbacks=[early_stop]) ``` ## Evaluate the Model ``` model.metrics_names losses = pd.DataFrame(model.history.history) losses.head() losses[['accuracy','val_accuracy']].plot() losses[['loss','val_loss']].plot() print(model.metrics_names) print(model.evaluate(x_test,y_cat_test,verbose=0)) from sklearn.metrics import classification_report,confusion_matrix predictions = model.predict_classes(x_test) y_cat_test.shape y_cat_test[0] predictions[0] y_test print(classification_report(y_test,predictions)) confusion_matrix(y_test,predictions) import seaborn as sns plt.figure(figsize=(10,6)) sns.heatmap(confusion_matrix(y_test,predictions),annot=True) # https://github.com/matplotlib/matplotlib/issues/14751 ``` # Predicting a given image ``` my_number = x_test[0] plt.imshow(my_number.reshape(28,28)) # SHAPE --> (num_images,width,height,color_channels) model.predict_classes(my_number.reshape(1,28,28,1)) ``` Looks like the CNN performed quite well!
github_jupyter
``` import os import json import random import re ## S3 Access import boto3 from sagemaker import get_execution_role role = get_execution_role() bucket='devopstar' data_key = 'resources/fbmsg-analysis-gpt-2/facebook.zip' s3 = boto3.resource('s3') with open('facebook.zip', 'wb') as data: s3.Bucket(bucket).download_fileobj(data_key, data) !unzip facebook.zip ## Download Dependencies !pip install --upgrade pip !pip install -r requirements.txt ## Download Model !sh download_model.sh 117M ## Get List of files files = [] for p, d, f in os.walk('messages/inbox'): for file in f: if file.endswith('message.json'): files.append(f'{p}/{file}') len(files) ## Helper Functions def fix_encoding(s): return re.sub('[\xc2-\xf4][\x80-\xbf]+',lambda m: m.group(0).encode('latin1').decode('utf8'),s) def find_cyrilic(s): return len(re.findall('(?i)[А-ЯЁ]', s)) > 0 def test_mostly_cyrilic(messages): i = 0 check_n = min(250, len(messages)) for msg in random.sample(messages, check_n): try: i +=find_cyrilic(fix_encoding(msg['content'])) or find_cyrilic(fix_encoding(msg['sender_name'])) except KeyError: check_n -=1 return i > check_n/5 ## Load Messages ### All Names def create_file(files=files): text_corpus = '' banned_names = () for file in files: with open(file, 'r') as f: try: msgs = json.load(f)['messages'] msgs.reverse() except: pass else: if not test_mostly_cyrilic(msgs) and not any(bn in file for bn in banned_names): for msg in msgs: try: content = fix_encoding(msg['content']) to_add = f"({msg['timestamp_ms']}) {msg['sender_name']}: {content}\n" if not find_cyrilic(to_add): text_corpus += to_add except KeyError: pass print(file) text_corpus += '\n\n' with open('fb-cleaned.txt', 'w') as f: f.write(text_corpus) ### Specify Particular Person def create_specific_file(person, files=files): text_corpus = '' for file in files: if person in file: print(file) with open(file, 'r') as f: try: msgs = json.load(f)['messages'] msgs.reverse() except: pass else: for msg in msgs: try: content = fix_encoding(msg['content']) to_add = f"({msg['timestamp_ms']}) {msg['sender_name']}: {content}\n" if not find_cyrilic(to_add): text_corpus += to_add except KeyError: pass text_corpus += '\n\n' with open(f'fb-cleaned-{person}.txt', 'w') as f: f.write(text_corpus) return ### Run create_file(files) ## Train !PYTHONPATH=src ./encode.py --in-text fb-cleaned.txt --out-npz fb-cleaned.txt.npz !PYTHONPATH=src ./train.py --dataset fb-cleaned.txt.npz --sample_every=250 --learning_rate=0.0001 --stop_after=251 ## Run mv checkpoint/run1/* models/117M/ !python3 src/generate_unconditional_samples.py --top_k 40 --temperature 0.9 ```
github_jupyter
# Augmentations in NLP Data Augmentation techniques in NLP show substantial improvements on datasets with less than 500 observations, as illustrated by the original paper. https://arxiv.org/abs/1901.11196 The Paper Considered here is EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks ``` # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session ``` # ***Simple Data Augmentatons Techniques* are:** 1. SR : Synonym Replacement 2. RD : Random Deletion 3. RS : Random Swap 4. RI : Random Insertion ``` data = pd.read_csv('../input/tweet-sentiment-extraction/train.csv') data.head() list_to_drop = ['textID','selected_text','sentiment'] data.drop(list_to_drop,axis=1,inplace=True) data.head() print(f"Total number of examples to be used is : {len(data)}") ``` # 1. Synonym Replacement : Synonym replacement is a technique in which we replace a word by one of its synonyms For identifying relevent Synonyms we use WordNet The get_synonyms funtion will return pre-processed list of synonyms of given word Now we will replace the words with synonyms ``` from nltk.corpus import stopwords stop_words = [] for w in stopwords.words('english'): stop_words.append(w) print(stop_words) import random from nltk.corpus import wordnet def get_synonyms(word): synonyms = set() for syn in wordnet.synsets(word): for l in syn.lemmas(): synonym = l.name().replace("_", " ").replace("-", " ").lower() synonym = "".join([char for char in synonym if char in ' qwertyuiopasdfghjklzxcvbnm']) synonyms.add(synonym) if word in synonyms: synonyms.remove(word) return list(synonyms) def synonym_replacement(words, n): words = words.split() new_words = words.copy() random_word_list = list(set([word for word in words if word not in stop_words])) random.shuffle(random_word_list) num_replaced = 0 for random_word in random_word_list: synonyms = get_synonyms(random_word) if len(synonyms) >= 1: synonym = random.choice(list(synonyms)) new_words = [synonym if word == random_word else word for word in new_words] num_replaced += 1 if num_replaced >= n: #only replace up to n words break sentence = ' '.join(new_words) return sentence print(f" Example of Synonym Replacement: {synonym_replacement('The quick brown fox jumps over the lazy dog',4)}") ``` To Get Larger Diversity of Sentences we could try replacing 1,2 3, .. Words in the given sentence. Now lets get an example from out dataset and try augmenting it so that we could create 3 additional sentences per tweet ``` trial_sent = data['text'][25] print(trial_sent) # Create 3 Augmented Sentences per data for n in range(3): print(f" Example of Synonym Replacement: {synonym_replacement(trial_sent,n)}") ``` Now we are able to augment this Data :) You can create New colums for the Same text-id in our tweet - sentiment Dataset # 2.Random Deletion (RD) In Random Deletion, we randomly delete a word if a uniformly generated number between 0 and 1 is smaller than a pre-defined threshold. This allows for a random deletion of some words of the sentence. ``` def random_deletion(words, p): words = words.split() #obviously, if there's only one word, don't delete it if len(words) == 1: return words #randomly delete words with probability p new_words = [] for word in words: r = random.uniform(0, 1) if r > p: new_words.append(word) #if you end up deleting all words, just return a random word if len(new_words) == 0: rand_int = random.randint(0, len(words)-1) return [words[rand_int]] sentence = ' '.join(new_words) return sentence ``` Lets test out this Augmentation with our test_sample ``` print(random_deletion(trial_sent,0.2)) print(random_deletion(trial_sent,0.3)) print(random_deletion(trial_sent,0.4)) ``` This Could help us in reducing Overfitting and may help to imporve our Model Accuracy # 3. Random Swap (RS) In Random Swap, we randomly swap the order of two words in a sentence. ``` def swap_word(new_words): random_idx_1 = random.randint(0, len(new_words)-1) random_idx_2 = random_idx_1 counter = 0 while random_idx_2 == random_idx_1: random_idx_2 = random.randint(0, len(new_words)-1) counter += 1 if counter > 3: return new_words new_words[random_idx_1], new_words[random_idx_2] = new_words[random_idx_2], new_words[random_idx_1] return new_words def random_swap(words, n): words = words.split() new_words = words.copy() # n is the number of words to be swapped for _ in range(n): new_words = swap_word(new_words) sentence = ' '.join(new_words) return sentence print(random_swap(trial_sent,1)) print(random_swap(trial_sent,2)) print(random_swap(trial_sent,3)) ``` This Random Swapping will help to make our models robust and may inturn help in text classification. High order of swapping may downgrade the model There is a high chance to loose semantics of language so be careful while using this augmentaion. # 4. Random Insertion (RI) Finally, in Random Insertion, we randomly insert synonyms of a word at a random position. Data augmentation operations should not change the true label of a sentence, as that would introduce unnecessary noise into the data. Inserting a synonym of a word in a sentence, opposed to a random word, is more likely to be relevant to the context and retain the original label of the sentence. ``` def random_insertion(words, n): words = words.split() new_words = words.copy() for _ in range(n): add_word(new_words) sentence = ' '.join(new_words) return sentence def add_word(new_words): synonyms = [] counter = 0 while len(synonyms) < 1: random_word = new_words[random.randint(0, len(new_words)-1)] synonyms = get_synonyms(random_word) counter += 1 if counter >= 10: return random_synonym = synonyms[0] random_idx = random.randint(0, len(new_words)-1) new_words.insert(random_idx, random_synonym) print(random_insertion(trial_sent,1)) print(random_insertion(trial_sent,2)) print(random_insertion(trial_sent,3)) def aug(sent,n,p): print(f" Original Sentence : {sent}") print(f" SR Augmented Sentence : {synonym_replacement(sent,n)}") print(f" RD Augmented Sentence : {random_deletion(sent,p)}") print(f" RS Augmented Sentence : {random_swap(sent,n)}") print(f" RI Augmented Sentence : {random_insertion(sent,n)}") aug(trial_sent,4,0.3) ```
github_jupyter
# Variational Quantum Regression $$ \newcommand{\ket}[1]{\left|{#1}\right\rangle} \newcommand{\bra}[1]{\left\langle{#1}\right|} \newcommand{\braket}[2]{\left\langle{#1}\middle|{#2}\right\rangle} $$ ## Introduction Here we create a protocol for linear regression which can exploit the properties of a quantum computer. For this problem, we assume that we have two data sets, x and y, where x is the independent data and y is the dependent data. There are N data points in each data set. We first want to fit this data to the following equation: $$y = ax + b$$ and then we will include higher powers of x. First, we will theoretically explore this proposed algorithm, and then we will tweak the code slightly so that it can be run on a real quantum computer. This algorithm has no known advantage over the most widely-used classical algorithm ([Least Squares Method](https://doi.org/10.1016/j.proeng.2012.09.545)), but does nicely demonstrate the different elements of variational quantum algorithms. ## Variational Quantum Computing Variational quantum computing exploits the advantages of both classical computing and quantum computing. In a very general sense, we propose an initial solution to a problem, called an ansatz. In our case our ansatz will be an ansatz parametrised by a and b. We then prepare our qubits (the quantum equivalent of bits on a normal computer) and test how good the ansatz is, using the quantum computer. Testing the ansatz equates to minimising a cost function. We feed the result of this cost function back to the classical computer, and use some classical optimisers to improve on our ansatz, i.e. our initial guesses for a and b. We repeat this process until the ansatz is good enough within some tolerance. ![title](images/vlinreg_circuit.png) ## Translate to Quantum Domain We now need to explore how we will translate the data set, y, onto a quantum computer. Let us think of y as a length N vector. The easiest way to encode this data set onto a quantum computer is by initialising qubits in the state $\ket{y}$, where $$\ket{y} = \frac{1}{C_y}\vec{y}$$ and $C_y$ is a normalisation factor. Now we propose a trial solution, or ansatz, which is parametrised by a and b, as follows: $$\ket{\Phi} = \frac{1}{C_{\Phi}}(a\vec{x} + b)$$ where $C_{\Phi}$ is again a normalisation factor. Due to the definition of the tensor product and the fact that the general statevector of a single qubit is a vector of length 2, $n$ qubits can encode length-$2^n$ vectors. ### Cost Function Our proposed cost function, which we wish to minimise is equal to $$C_P = \big(1 - \braket{y}{\Phi}\big)^2$$ This computes the normalised fidelity (similarity) of $\ket{y}$ and $\ket{\Phi}$. We see that if $\ket{y}$ and $\ket{\Phi}$ are equal, our cost function will equal 0, otherwise it will be greater than 0. Thus, we need to compute this cost function with our quantum hardware, and couple it with classical minimising algorithms. ### Computing Inner Products on a Quantum Computer It is clear we now need a quantum algorithm for computing inner products. Let us go through the theory of computing the inner product $\braket{x}{y}$ here, which will be translated to quantum hardware in a couple of sections. Firstly, assume we have a state: $$ \ket{\phi} = \frac{1}{\sqrt{2}}\big(\ket{0}\ket{x} + \ket{1}\ket{y}\big) $$ where we want to find the inner product, $\braket{x}{y}$. Applying a Hadamard gate on the first qubit, we find: $$ \ket{\tilde{\phi}} = \frac{1}{2}\Big(\ket{0}\big(\ket{x}+\ket{y}\big) + \ket{1}\big(\ket{x}-\ket{y}\big)\Big) $$ This means that the probability to measure the first qubit as $\ket{0}$ in the computational basis equals: $$ P(0) = \frac{1}{2}\Big(1+Re\big[\braket{x}{y}\big]\Big) $$ This follows because: $$ \begin{aligned} P(0) &= \Big|\bra{0}\otimes\mathbb{1}\ket{\tilde{\phi}}\Big|^2 \\ &= \frac{1}{4}\Big|\ket{x}+\ket{y}\Big|^2 \\ &= \frac{1}{4}\big(\braket{x}{x}+\braket{x}{y}+\braket{y}{x}+\braket{y}{y}\big) \\ &= \frac{1}{4}\Big(2 + 2 Re\big[\braket{x}{y}\big]\Big) \\ &= \frac{1}{2}\Big(1+Re\big[\braket{x}{y}\big]\Big) \end{aligned} $$ After a simple rearrangement, we see that $$Re\big[\braket{x}{y}\big] = 2P(0) - 1$$ It follows from a similar logic that if we apply a phase rotation on our initial state: $$ \ket{\phi} = \frac{1}{\sqrt{2}}\big(\ket{0}\ket{x} -i \ket{1}\ket{y}\big) $$ then the probability of the same measurement: $$ P(0) = \frac{1}{2}\Big(1+Im\big[\braket{x}{y}\big]\Big) $$ We can then combine both probabilities to find the true $\braket{x}{y}$. For this work, we assume that our states are fully real, and so just need the first measurement. ## Code Implementation - Theoretical Approach It should be noted here that qiskit orders its qubits with the last qubit corresponding to the left of the tensor product. For this run through, we are computing the inner product of length-8 vectors. Thus, we require 4 qubits ($8 + 8 = 16 = 2^4$) to encode the state: $$ \begin{aligned} \ket{\phi} &= \frac{1}{\sqrt{2}}(\ket{0}\ket{x} + \ket{1}\ket{y}) \\ &= \frac{1}{\sqrt{2}}\left(\begin{bmatrix}1\\0\end{bmatrix}\otimes\begin{bmatrix}x_1\\x_2\\\vdots\\x_n \end{bmatrix} +\begin{bmatrix}0\\1\end{bmatrix}\otimes\begin{bmatrix}y_1\\y_2\\\vdots\\y_n \end{bmatrix} \right) \\ &= \frac{1}{\sqrt{2}}\left(\begin{bmatrix}x_1\\x_2\\\vdots\\x_n \\y_1\\y_2\\\vdots\\y_n \end{bmatrix} \right) \end{aligned} $$ Finally, in order to measure the probability of measuring the bottom (leftmost) qubit as $\ket{0}$ in the computational basis, we can find the exact theoretical value by finding the resultant statevector and summing up the amplitude squared of the first $2^{n-1}$ entries (i.e. half of them). On a real quantum computer, we would just have to perform the actual measurement many times over, and compute the probability that way. We will show the theoretical approach in practice first. ``` # importing necessary packages import qiskit from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import Aer, execute import math import random import numpy as np import matplotlib.pyplot as plt from scipy.optimize import minimize ``` Now, let's draw the required diagram for theoretically computing the inner product of any two states. Note that the only difference between this circuit diagram and the real, practical diagram for actually running on a quantum computer is that we do not measure the left-most qubit in the computational basis. Again, note that the left-most qubit corresponds to the bottom qubit. ``` x = np.arange(0,8,1) # define some vectors x and y y = x N = len(x) nqubits = math.ceil(np.log2(N)) # compute how many qubits needed to encode either x or y xnorm = np.linalg.norm(x) # normalise vectors x and y ynorm = np.linalg.norm(y) x = x/xnorm y = y/ynorm circ = QuantumCircuit(nqubits+1) # create circuit vec = np.concatenate((x,y))/np.sqrt(2) # concatenate x and y as above, with renormalisation circ.initialize(vec, range(nqubits+1)) circ.h(nqubits) # apply hadamard to bottom qubit circ.draw() # draw the circuit ``` Now let's build a function around this circuit, so that we can theoretically compute the inner product between any two normalised vectors. ``` #Creates a quantum circuit to calculate the inner product between two normalised vectors def inner_prod(vec1, vec2): #first check lengths are equal if len(vec1) != len(vec2): raise ValueError('Lengths of states are not equal') circ = QuantumCircuit(nqubits+1) vec = np.concatenate((vec1,vec2))/np.sqrt(2) circ.initialize(vec, range(nqubits+1)) circ.h(nqubits) backend = Aer.get_backend('statevector_simulator') job = execute(circ, backend, backend_options = {"zero_threshold": 1e-20}) result = job.result() o = np.real(result.get_statevector(circ)) m_sum = 0 for l in range(N): m_sum += o[l]**2 return 2*m_sum-1 x = np.arange(0,8,1) y = x N = len(x) nqubits = math.ceil(np.log2(N)) xnorm = np.linalg.norm(x) ynorm = np.linalg.norm(y) x = x/xnorm y = y/ynorm print("x: ", x) print() print("y: ", y) print() print("The inner product of x and y equals: ", inner_prod(x,y)) ``` Now, let's build a function to compute the cost function associated with any choice of a and b. We have set up x and y such that the correct parameters are (a,b) = (1,0). ``` #Implements the entire cost function by feeding the ansatz to the quantum circuit which computes inner products def calculate_cost_function(parameters): a, b = parameters ansatz = a*x + b # compute ansatz ansatzNorm = np.linalg.norm(ansatz) # normalise ansatz ansatz = ansatz/ansatzNorm y_ansatz = ansatzNorm/ynorm * inner_prod(y,ansatz) # use quantum circuit to test ansatz # note the normalisation factors return (1-y_ansatz)**2 x = np.arange(0,8,1) y = x N = len(x) nqubits = math.ceil(np.log2(N)) ynorm = np.linalg.norm(y) y = y/ynorm a = 1.0 b = 1.0 print("Cost function for a =", a, "and b =", b, "equals:", calculate_cost_function([a,b])) ``` Now putting everything together and using a classical optimiser from the scipy library, we get the full code. ``` #first set up the data sets x and y x = np.arange(0,8,1) y = x # + [random.uniform(-1,1) for p in range(8)] # can add noise here N = len(x) nqubits = math.ceil(np.log2(N)) ynorm = np.linalg.norm(y) # normalise the y data set y = y/ynorm x0 = [0.5,0.5] # initial guess for a and b #now use different classical optimisers to see which one works best out = minimize(calculate_cost_function, x0=x0, method="BFGS", options={'maxiter':200}, tol=1e-6) out1 = minimize(calculate_cost_function, x0=x0, method="COBYLA", options={'maxiter':200}, tol=1e-6) out2 = minimize(calculate_cost_function, x0=x0, method="Nelder-Mead", options={'maxiter':200}, tol=1e-6) out3 = minimize(calculate_cost_function, x0=x0, method="CG", options={'maxiter':200}, tol=1e-6) out4 = minimize(calculate_cost_function, x0=x0, method="trust-constr", options={'maxiter':200}, tol=1e-6) out_a1 = out1['x'][0] out_b1 = out1['x'][1] out_a = out['x'][0] out_b = out['x'][1] out_a2 = out2['x'][0] out_b2 = out2['x'][1] out_a3 = out3['x'][0] out_b3 = out3['x'][1] out_a4 = out4['x'][0] out_b4 = out4['x'][1] plt.scatter(x,y*ynorm) xfit = np.linspace(min(x), max(x), 100) plt.plot(xfit, out_a*xfit+out_b, label='BFGS') plt.plot(xfit, out_a1*xfit+out_b1, label='COBYLA') plt.plot(xfit, out_a2*xfit+out_b2, label='Nelder-Mead') plt.plot(xfit, out_a3*xfit+out_b3, label='CG') plt.plot(xfit, out_a4*xfit+out_b4, label='trust-constr') plt.legend() plt.title("y = x") plt.xlabel("x") plt.ylabel("y") plt.show() ``` ## Code Implementation - Practical Approach In order to modify the above slightly so that it can be run on a real quantum computer, we simply have to modify the `inner_prod` function. Instead of theoretically extracting the probabilility of measuring a 0 on the leftmost qubit in the computational basis, we must actually measure this qubit a number of times and calculate the probability from these samples. Our new circuit can be created as follows, which is identical to the theoretical circuit, but we just add a measurement, and hence need a classical bit. ``` x = np.arange(0,8,1) # define some vectors x and y y = x N = len(x) nqubits = math.ceil(np.log2(N)) # compute how many qubits needed to encode either x or y xnorm = np.linalg.norm(x) # normalise vectors x and y ynorm = np.linalg.norm(y) x = x/xnorm y = y/ynorm circ = QuantumCircuit(nqubits+1,1) # create circuit vec = np.concatenate((x,y))/np.sqrt(2) # concatenate x and y as above, with renormalisation circ.initialize(vec, range(nqubits+1)) circ.h(nqubits) # apply hadamard to bottom qubit circ.measure(nqubits,0) # measure bottom qubit in computational basis circ.draw() # draw the circuit ``` Now, we can build a new inner_prod function around this circuit, using a different simulator from qiskit. ``` #Creates quantum circuit which calculates the inner product between two normalised vectors def inner_prod(vec1, vec2): #first check lengths are equal if len(vec1) != len(vec2): raise ValueError('Lengths of states are not equal') circ = QuantumCircuit(nqubits+1,1) vec = np.concatenate((vec1,vec2))/np.sqrt(2) circ.initialize(vec, range(nqubits+1)) circ.h(nqubits) circ.measure(nqubits,0) backend = Aer.get_backend('qasm_simulator') job = execute(circ, backend, shots=20000) result = job.result() outputstate = result.get_counts(circ) if ('0' in outputstate.keys()): m_sum = float(outputstate["0"])/20000 else: m_sum = 0 return 2*m_sum-1 x = np.arange(0,8,1) y = x N = len(x) nqubits = math.ceil(np.log2(N)) xnorm = np.linalg.norm(x) ynorm = np.linalg.norm(y) x = x/xnorm y = y/ynorm print("x: ", x) print() print("y: ", y) print() print("The inner product of x and y equals: ", inner_prod(x,y)) ``` Our cost function calculation is the same as before, but we now just use this new method for computing the inner product, so the full code can be run as follows. ``` #first set up the data sets x and y x = np.arange(0,8,1) y = x # + [random.uniform(-1,1) for p in range(8)] # can add noise here N = len(x) nqubits = math.ceil(np.log2(N)) ynorm = np.linalg.norm(y) # normalise y data set y = y/ynorm x0 = [0.5,0.5] # initial guess for a and b #now use different classical optimisers to see which one works best out = minimize(calculate_cost_function, x0=x0, method="BFGS", options={'maxiter':200}, tol=1e-6) out1 = minimize(calculate_cost_function, x0=x0, method="COBYLA", options={'maxiter':200}, tol=1e-6) out2 = minimize(calculate_cost_function, x0=x0, method="Nelder-Mead", options={'maxiter':200}, tol=1e-6) out3 = minimize(calculate_cost_function, x0=x0, method="CG", options={'maxiter':200}, tol=1e-6) out4 = minimize(calculate_cost_function, x0=x0, method="trust-constr", options={'maxiter':200}, tol=1e-6) out_a1 = out1['x'][0] out_b1 = out1['x'][1] out_a = out['x'][0] out_b = out['x'][1] out_a2 = out2['x'][0] out_b2 = out2['x'][1] out_a3 = out3['x'][0] out_b3 = out3['x'][1] out_a4 = out4['x'][0] out_b4 = out4['x'][1] plt.scatter(x,y*ynorm) xfit = np.linspace(min(x), max(x), 100) plt.plot(xfit, out_a*xfit+out_b, label='BFGS') plt.plot(xfit, out_a1*xfit+out_b1, label='COBYLA') plt.plot(xfit, out_a2*xfit+out_b2, label='Nelder-Mead') plt.plot(xfit, out_a3*xfit+out_b3, label='CG') plt.plot(xfit, out_a4*xfit+out_b4, label='trust-constr') plt.legend() plt.title("y = x") plt.xlabel("x") plt.ylabel("y") plt.show() ``` ## Extending to Higher Order Fits We can also extend to fitting to quadratic, cubic, and higher order polynomials. The code remains relatively unchanged, but will update the cost function slightly. We can of course use either the theoretical or practical method for computing the inner products in the following cost function. We are now fitting to an n$^{th}$-order polynomial: $$y = a_0+ a_1 x + a_2 x^2 + \dots + a_n x^n$$ ``` # New cost function calculation, allowing for higher order polynomials # Implements the entire cost function by feeding the ansatz to the quantum circuit which computes inner products def calculate_cost_function_n(parameters): ansatz = parameters[0] # compute ansatz for i in range(1,len(parameters)): ansatz += parameters[i] * x**i ansatzNorm = np.linalg.norm(ansatz) # normalise ansatz ansatz = ansatz/ansatzNorm y_ansatz = ansatzNorm/ynorm * inner_prod(y,ansatz) # use quantum circuit to test ansatz # note the normalisation factors return (1-y_ansatz)**2 #first set up the data sets x and y x = np.arange(0,8,1) y = (2*x-1)**3 + [random.uniform(-1,1) for p in range(8)] N = len(x) nqubits = math.ceil(np.log2(N)) ynorm = np.linalg.norm(y) #normalise y data set y = y/ynorm order = 3 x0 = [random.uniform(0,2) for p in range(order+1)] #random initial guess for a and b #now use different classical optimisers to see which one works best out = minimize(calculate_cost_function_n, x0=x0, method="BFGS", options={'maxiter':200}, tol=1e-6) out1 = minimize(calculate_cost_function_n, x0=x0, method="COBYLA", options={'maxiter':200}, tol=1e-6) out2 = minimize(calculate_cost_function_n, x0=x0, method="Nelder-Mead", options={'maxiter':200}, tol=1e-6) out3 = minimize(calculate_cost_function_n, x0=x0, method="CG", options={'maxiter':200}, tol=1e-6) out4 = minimize(calculate_cost_function_n, x0=x0, method="trust-constr", options={'maxiter':200}, tol=1e-6) class_fit = np.polyfit(x,y*ynorm,order) class_fit = class_fit[::-1] xfit = np.linspace(min(x), max(x), 100) def return_fits(xfit): c_fit = np.zeros(100) q_fit = np.zeros(100) q_fit1 = np.zeros(100) q_fit2 = np.zeros(100) q_fit3 = np.zeros(100) q_fit4 = np.zeros(100) for i in range(order+1): c_fit += xfit**i*class_fit[i] q_fit += xfit**i*out['x'][i] q_fit1 += xfit**i*out1['x'][i] q_fit2 += xfit**i*out2['x'][i] q_fit3 += xfit**i*out3['x'][i] q_fit4 += xfit**i*out4['x'][i] return c_fit, q_fit, q_fit1, q_fit2, q_fit3, q_fit4 c_fit, q_fit, q_fit1, q_fit2, q_fit3, q_fit4 = return_fits(xfit) plt.scatter(x,y*ynorm) xfit = np.linspace(min(x), max(x), 100) plt.plot(xfit, c_fit, label='Classical') plt.plot(xfit, q_fit, label='BFGS') plt.plot(xfit, q_fit1, label='COBYLA') plt.plot(xfit, q_fit2, label='Nelder-Mead') plt.plot(xfit, q_fit3, label='CG') plt.plot(xfit, q_fit4, label='trust-constr') plt.legend() plt.title("$y = (2x-1)^3$ + Random Perturbation") plt.xlabel("x") plt.ylabel("y") plt.show() ``` ## Acknowledgements I would like to thank Dr. Lee O'Riordan for his supervision and guidance on this work. The work was mainly inspired by work presented in the research paper "Variational Quantum Linear Solver: A Hybrid Algorithm for Linear Systems", written by Carlos Bravo-Prieto, Ryan LaRose, M. Cerezo, Yiğit Subaşı, Lukasz Cincio, and Patrick J. Coles, which is available at this [link](https://arxiv.org/abs/1909.05820). I would also like to thank the Irish Centre for High End Computing for allowing me to access the national HPC infrastructure, Kay.
github_jupyter
# Cyberbullying model using XGBoost, Random Forest and SVC ``` import pandas as pd import numpy as np import string import nltk from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from nltk.corpus import wordnet from nltk import pos_tag from nltk.tokenize import word_tokenize from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split from xgboost import XGBClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV ``` ## Preprocessing the dataset ``` lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) stop_words.update(list(string.punctuation)) df = pd.read_csv("anti-bully-data.csv") df.head() messages = df['text_message'] y = df['label_bullying'] def get_simple_pos(tag) : if tag.startswith('J') : return wordnet.ADJ elif tag.startswith('V') : return wordnet.VERB elif tag.startswith('N') : return wordnet.NOUN elif tag.startswith('R') : return wordnet.ADV else: return wordnet.NOUN def clean_text(review) : global max_len words = word_tokenize(review) output_words = [] for word in words : if word.lower() not in stop_words : pos = pos_tag([word]) clean_word = lemmatizer.lemmatize(word,pos = get_simple_pos(pos[0][1])) output_words.append(clean_word.lower()) max_len = max(max_len, len(output_words)) return " ".join(output_words) max_len = 0 messages = [clean_text(message) for message in messages] print(messages[0]) X_train, X_test, Y_train, Y_test = train_test_split(messages, y, random_state = 0, test_size = 0.15) len(X_train) CountVec = CountVectorizer(max_df = 0.8,min_df = 0.0005) X_train = CountVec.fit_transform(X_train) X_test = CountVec.transform(X_test) ``` ## The XGBoost model ``` xgb = XGBClassifier() xgb.fit(X_train, Y_train) print(xgb.score(X_train, Y_train)) print(xgb.score(X_test, Y_test)) ``` ## The Random Forest model ``` rfc = RandomForestClassifier() rfc.fit(X_train, Y_train) print(rfc.score(X_train, Y_train)) print(rfc.score(X_test, Y_test)) ``` ## The Support Vector Classifier model ``` svc = SVC(kernel = 'rbf') svc.fit(X_train, Y_train) print(svc.score(X_train, Y_train)) print(svc.score(X_test, Y_test)) params = {'C' : [1e2,1e3,5e3,1e4,5,5e4,1e5],'gamma':[1e-3,5e-4,1e-4,5e-3]} grid = GridSearchCV(svc,params) grid.fit(X_train, Y_train) print(grid.score(X_train, Y_train)) print(grid.score(X_test, Y_test)) ``` # Accuracy of each model ### 1. XGBoost: 71.35% ### 2. Random Forest:66.82% ### 3. SVC: 71.50
github_jupyter
``` import cupy as cp import cusignal from scipy import signal import numpy as np ``` ### Generate Sinusodial Signals with N Carriers **On CPU where**: * fs = sample rate of signal * freq = list of carrier frequencies * N = number of points in signal ``` def cpu_gen_signal(fs, freq, N): T = 1/fs sig = 0 x = np.linspace(0.0, N*(1.0/fs), N) for f in freq: sig += np.cos(f*2*cp.pi*x) return sig def cpu_gen_ensemble(fs, N, num_sig): sig_ensemble = np.zeros((int(num_sig), int(N))) for i in range(int(num_sig)): # random number of carriers in random locations for each signal freq = 1e6 * np.random.randint(1, 10, np.random.randint(1,5)) sig_ensemble[i,:] = cpu_gen_signal(fs, freq, N) return sig_ensemble ``` **On GPU** Please note, first run of GPU functions includes setting up memory and 'pre-warming' the GPU. For accurate performance and benchmarking each cell is typically run multiple times. ``` def gpu_gen_signal(fs, freq, N): T = 1/fs sig = 0 x = cp.linspace(0.0, N*(1.0/fs), N) for f in freq: sig += cp.cos(f*2*cp.pi*x) return sig # Storing num carriers for deep learning prediction -- We're even HURTING ourself here with benchmarks! def gpu_gen_ensemble(fs, N, num_sig): sig_ensemble = cp.zeros((int(num_sig), int(N))) num_carriers = cp.zeros(int(num_sig)) for i in range(int(num_sig)): # random number of carriers in random locations for each signal num_carrier = int(cp.random.randint(1,5)) freq = 1e6 * cp.random.randint(1, 10, num_carrier) sig_ensemble[i,:] = gpu_gen_signal(fs, freq, N) num_carriers[i] = num_carrier return sig_ensemble, num_carriers ``` Generate a bunch of different signals with arbitrary carrier frequencies. Allow user to select number of signals, sample frequency of the ensemble, and number of points in the signal ``` #10MHz fs = 10e6 # Overwrite num_sig = 2000 N = 2**15 # Change sample rate so N=2^16 up = 2 down = 1 cpu_ensemble = cpu_gen_ensemble(fs, N, num_sig) [gpu_ensemble, num_carriers] = gpu_gen_ensemble(fs, N, num_sig) ``` ### Resample Ensemble - Use Polyphase Resampler to upsample by 2 **On CPU** ``` %%time resample_cpu_ensemble = signal.resample_poly(cpu_ensemble, up, down, axis=1, window='flattop') ``` **On GPU** ``` %%time resample_gpu_ensemble = cusignal.resample_poly(gpu_ensemble, up, down, axis=1, window='flattop') ``` ### Run Periodogram with Flattop Filter over Each Row of Ensemble **On CPU** ``` %%time cf, cPxx_den = signal.periodogram(resample_cpu_ensemble, fs, 'flattop', scaling='spectrum', axis=1) ``` **On GPU** ``` %%time gf, gPxx_den = cusignal.periodogram(resample_gpu_ensemble, fs, 'flattop', scaling='spectrum', axis=1) ``` ### Visualize Output **On CPU** ``` %matplotlib inline import matplotlib.pyplot as plt plt.semilogy(cf, cPxx_den[0,:]) plt.show() ``` **On GPU** ``` import matplotlib.pyplot as plt plt.semilogy(cp.asnumpy(gf), cp.asnumpy(gPxx_den[0,:])) plt.show() ``` ### Move to PyTorch to try to 'predict' number of carriers in signal ``` # Uncomment the line below to ensure PyTorch is installed. # PyTorch is intentionally excluded from our Docker images due to its size. # Alternatively, the docker image can be run with the following variable: # docker run -e EXTRA_CONDA_PACKAGES="-c pytorch pytorch"... #!conda install -y -c pytorch pytorch import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import torch.nn.functional as F device = torch.device("cuda:0") #90 percent of dataset for training training_idx_max = int(0.9*gPxx_den.shape[0]) gPxx_den = gPxx_den.astype(cp.float32) num_carriers = num_carriers.astype(cp.int64) # Zero copy memory from cupy to DLPack to Torch x = torch.as_tensor(gPxx_den[0:training_idx_max,:], device=device) y = torch.as_tensor(num_carriers[0:training_idx_max], device=device) # Test x_t = torch.as_tensor(gPxx_den[training_idx_max:gPxx_den.shape[0],:], device=device) y_t = torch.as_tensor(num_carriers[training_idx_max:gPxx_den.shape[0]], device=device) # Number of possible carriers output_size = 10 epochs = 75 batch_size = 10 learning_rate = 1e-2 class Network(nn.Module): def __init__(self): super(Network, self).__init__() self.l1 = nn.Linear(x.shape[1], 1500) self.relu = nn.ReLU() self.l3 = nn.Linear(1500, 750) self.relu = nn.ReLU() self.l5 = nn.Linear(750, output_size) def forward(self, x): x = self.l1(x) x = self.relu(x) x = self.l3(x) x = self.relu(x) x = self.l5(x) return F.log_softmax(x, dim=1) net = Network().to(device) optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.5) loss_log = [] for e in range(epochs): for i in range(0, x.shape[0], batch_size): x_mini = x[i:i + batch_size] y_mini = y[i:i + batch_size] x_var = Variable(x_mini) y_var = Variable(y_mini) optimizer.zero_grad() net_out = net(x_var) loss = F.nll_loss(net_out, y_var) loss.backward() optimizer.step() if i % 100 == 0: loss_log.append(loss.data) print('Epoch: {} - Loss: {:.6f}'.format(e, loss.data)) ``` **Measure Inference Accuracy on Test Set** ``` test_loss = 0 correct = 0 for i in range(x_t.shape[0]): pred = net(x_t[i,:].expand(1,-1)).argmax() correct += pred.eq(y_t[i].view_as(pred)).sum().item() print('Accuracy: ', 100. * correct / x_t.shape[0]) ``` **Save Model** ``` checkpoint = {'net': Network(), 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict()} torch.save(checkpoint,"E2E_sig_proc.pt") ``` **Load Model** ``` checkpoint = torch.load('E2E_sig_proc.pt') checkpoint.keys() ``` **Generate New Signal and Look at Inferencing Power** ``` num_carrier = 2 freq = 1e6 * cp.random.randint(1, 10, num_carrier) sig = gpu_gen_signal(fs, freq, N) r_sig = cusignal.resample_poly(sig, up, down, window='flattop') f, Pxx = cusignal.periodogram(r_sig, fs, 'flattop', scaling='spectrum') x = torch.as_tensor(Pxx.astype(cp.float32), device=device) pred_num_carrier = net(x.expand(1,-1)).argmax().item() print(pred_num_carrier) ```
github_jupyter
# Node elevations and edge grades Author: [Geoff Boeing](https://geoffboeing.com/) - [Overview of OSMnx](http://geoffboeing.com/2016/11/osmnx-python-street-networks/) - [GitHub repo](https://github.com/gboeing/osmnx) - [Examples, demos, tutorials](https://github.com/gboeing/osmnx-examples) - [Documentation](https://osmnx.readthedocs.io/en/stable/) - [Journal article/citation](http://geoffboeing.com/publications/osmnx-complex-street-networks/) OSMnx allows you to automatically add elevation attributes to your graph's nodes with the `elevation` module, using either local raster files or the Google Maps Elevation API as the elevation data source. If you use the Google API, you will need an API key. Once your nodes have elevation values, OSMnx can automatically calculate your edges' grades (inclines). ``` import sys import numpy as np import osmnx as ox import pandas as pd %matplotlib inline ox.__version__ ``` ## Elevation from local raster file(s) OSMnx can attach elevations to graph nodes using either a single raster file or a list of raster files. The latter creates a virtual raster VRT composed of the rasters at those filepaths. By default, it uses all available CPUs but you can configure this with an argument. ``` address = "600 Montgomery St, San Francisco, California, USA" G = ox.graph_from_address(address=address, dist=500, dist_type="bbox", network_type="bike") # add node elevations from a single raster file # some nodes will be null because the single file does not cover the graph's extents raster_path = "./input_data/elevation1.tif" G = ox.elevation.add_node_elevations_raster(G, raster_path, cpus=1) # add node elevations from multiple raster files # no nulls should remain raster_paths = ["./input_data/elevation1.tif", "./input_data/elevation2.tif"] G = ox.elevation.add_node_elevations_raster(G, raster_paths) assert not np.isnan(np.array(G.nodes(data="elevation"))[:, 1]).any() # add edge grades and their absolute values G = ox.elevation.add_edge_grades(G, add_absolute=True) ``` ## Elevation from Google Maps Elevation API You will need a Google Maps Elevation [API key](https://developers.google.com/maps/documentation/elevation/start). Consider your API usage limits. OSMnx rounds coordinates to 5 decimal places (approx 1 meter) to fit 350 locations in a batch. Note that there is some spatial inaccuracy given Google's dataset's resolution. For example, in San Francisco (where the resolution is 19 meters) a couple of edges in hilly parks have a 50+ percent grade because Google assigns one of their nodes the elevation of a hill adjacent to the street. ``` # replace this with your own API key! try: from keys import google_elevation_api_key except ImportError: sys.exit() # you need an API key to proceed # get the street network for san francisco place = "San Francisco" place_query = {"city": "San Francisco", "state": "California", "country": "USA"} G = ox.graph_from_place(place_query, network_type="drive") # add elevation to each of the nodes, using the google elevation API, then calculate edge grades G = ox.elevation.add_node_elevations_google(G, api_key=google_elevation_api_key) G = ox.elevation.add_edge_grades(G) ``` ## Calculate some summary stats Use an undirected representation of the network so we don't overcount two-way streets (because they have reciprocal edges pointing in each direction). We use the absolute value of edge grade because we're interested in steepness, not directionality. ``` # calculate the edges' absolute grades (and drop any infinite/null values) grades = pd.Series([d["grade_abs"] for _, _, d in ox.get_undirected(G).edges(data=True)]) grades = grades.replace([np.inf, -np.inf], np.nan).dropna() avg_grade = np.mean(grades) print("Average street grade in {} is {:.1f}%".format(place, avg_grade * 100)) med_grade = np.median(grades) print("Median street grade in {} is {:.1f}%".format(place, med_grade * 100)) ``` ## Plot the nodes by elevation Plot them colored from low (violet) to high (yellow). ``` # get one color for each node, by elevation, then plot the network nc = ox.plot.get_node_colors_by_attr(G, "elevation", cmap="plasma") fig, ax = ox.plot_graph(G, node_color=nc, node_size=5, edge_color="#333333", bgcolor="k") ``` ## Plot the edges by grade Grade is the ratio of elevation change to edge length. Plot edges colored from low/flat (violet) to high/steep (yellow). ``` # get a color for each edge, by grade, then plot the network ec = ox.plot.get_edge_colors_by_attr(G, "grade_abs", cmap="plasma", num_bins=5, equal_size=True) fig, ax = ox.plot_graph(G, edge_color=ec, edge_linewidth=0.5, node_size=0, bgcolor="k") ``` ## Calculate shortest paths, considering grade impedance This example approximates the route of "The Wiggle" in San Francisco. ``` # select an origin and destination node and a bounding box around them origin = ox.distance.nearest_nodes(G, -122.426, 37.77) destination = ox.distance.nearest_nodes(G, -122.441, 37.773) bbox = ox.utils_geo.bbox_from_point((37.772, -122.434), dist=1500) # define some edge impedance function here def impedance(length, grade): penalty = grade ** 2 return length * penalty # add impedance and elevation rise values to each edge in the projected graph # use absolute value of grade in impedance function if you want to avoid uphill and downhill for _, _, _, data in G.edges(keys=True, data=True): data["impedance"] = impedance(data["length"], data["grade_abs"]) data["rise"] = data["length"] * data["grade"] ``` #### First find the shortest path that minimizes *trip distance*: ``` route_by_length = ox.shortest_path(G, origin, destination, weight="length") fig, ax = ox.plot_graph_route(G, route_by_length, bbox=bbox, node_size=0) ``` #### Now find the shortest path that avoids slopes by minimizing *impedance* (function of length and grade): ``` route_by_impedance = ox.shortest_path(G, origin, destination, weight="impedance") fig, ax = ox.plot_graph_route(G, route_by_impedance, bbox=bbox, node_size=0) ``` #### Print some summary stats about these two routes: ``` def print_route_stats(route): route_grades = ox.utils_graph.get_route_edge_attributes(G, route, "grade_abs") msg = "The average grade is {:.1f}% and the max is {:.1f}%" print(msg.format(np.mean(route_grades) * 100, np.max(route_grades) * 100)) route_rises = ox.utils_graph.get_route_edge_attributes(G, route, "rise") ascent = np.sum([rise for rise in route_rises if rise >= 0]) descent = np.sum([rise for rise in route_rises if rise < 0]) msg = "Total elevation change is {:.1f} meters: {:.0f} meter ascent and {:.0f} meter descent" print(msg.format(np.sum(route_rises), ascent, abs(descent))) route_lengths = ox.utils_graph.get_route_edge_attributes(G, route, "length") print("Total trip distance: {:,.0f} meters".format(np.sum(route_lengths))) # stats of route minimizing length print_route_stats(route_by_length) # stats of route minimizing impedance (function of length and grade) print_route_stats(route_by_impedance) ``` So, we decreased the average slope along the route from a 5% grade to a 2% grade. The total elevation change is obviously (approximately, due to rounding) the same with either route, but using our impedance function we decrease the total ascent from 69 meters to 40 meters (but the trip distance increases from 1.9 km to 2.6 km).
github_jupyter
# Bayesian Estimation Supersedes the T-Test ``` %matplotlib inline import numpy as np import pymc3 as pm import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-darkgrid') print('Running on PyMC3 v{}'.format(pm.__version__)) ``` This model replicates the example used in: Kruschke, John. (2012) **Bayesian estimation supersedes the t-test**. *Journal of Experimental Psychology*: General. ### The Problem Several statistical inference procedures involve the comparison of two groups. We may be interested in whether one group is larger than another, or simply different from the other. We require a statistical model for this because true differences are usually accompanied by measurement or stochastic noise that prevent us from drawing conclusions simply from differences calculated from the observed data. The *de facto* standard for statistically comparing two (or more) samples is to use a statistical test. This involves expressing a null hypothesis, which typically claims that there is no difference between the groups, and using a chosen test statistic to determine whether the distribution of the observed data is plausible under the hypothesis. This rejection occurs when the calculated test statistic is higher than some pre-specified threshold value. Unfortunately, it is not easy to conduct hypothesis tests correctly, and their results are very easy to misinterpret. Setting up a statistical test involves several subjective choices (*e.g.* statistical test to use, null hypothesis to test, significance level) by the user that are rarely justified based on the problem or decision at hand, but rather, are usually based on traditional choices that are entirely arbitrary (Johnson 1999). The evidence that it provides to the user is indirect, incomplete, and typically overstates the evidence against the null hypothesis (Goodman 1999). A more informative and effective approach for comparing groups is one based on **estimation** rather than **testing**, and is driven by Bayesian probability rather than frequentist. That is, rather than testing whether two groups are different, we instead pursue an estimate of how different they are, which is fundamentally more informative. Moreover, we include an estimate of uncertainty associated with that difference which includes uncertainty due to our lack of knowledge of the model parameters (epistemic uncertainty) and uncertainty due to the inherent stochasticity of the system (aleatory uncertainty). ## Example: Drug trial evaluation To illustrate how this Bayesian estimation approach works in practice, we will use a fictitious example from Kruschke (2012) concerning the evaluation of a clinical trial for drug evaluation. The trial aims to evaluate the efficacy of a "smart drug" that is supposed to increase intelligence by comparing IQ scores of individuals in a treatment arm (those receiving the drug) to those in a control arm (those recieving a placebo). There are 47 individuals and 42 individuals in the treatment and control arms, respectively. ``` drug = (101,100,102,104,102,97,105,105,98,101,100,123,105,103,100,95,102,106, 109,102,82,102,100,102,102,101,102,102,103,103,97,97,103,101,97,104, 96,103,124,101,101,100,101,101,104,100,101) placebo = (99,101,100,101,102,100,97,101,104,101,102,102,100,105,88,101,100, 104,100,100,100,101,102,103,97,101,101,100,101,99,101,100,100, 101,100,99,101,100,102,99,100,99) y1 = np.array(drug) y2 = np.array(placebo) y = pd.DataFrame(dict(value=np.r_[y1, y2], group=np.r_[['drug']*len(drug), ['placebo']*len(placebo)])) y.hist('value', by='group', figsize=(12, 4)); ``` The first step in a Bayesian approach to inference is to specify the full probability model that corresponds to the problem. For this example, Kruschke chooses a Student-t distribution to describe the distributions of the scores in each group. This choice adds robustness to the analysis, as a T distribution is less sensitive to outlier observations, relative to a normal distribution. The three-parameter Student-t distribution allows for the specification of a mean $\mu$, a precision (inverse-variance) $\lambda$ and a degrees-of-freedom parameter $\nu$: $$f(x|\mu,\lambda,\nu) = \frac{\Gamma(\frac{\nu + 1}{2})}{\Gamma(\frac{\nu}{2})} \left(\frac{\lambda}{\pi\nu}\right)^{\frac{1}{2}} \left[1+\frac{\lambda(x-\mu)^2}{\nu}\right]^{-\frac{\nu+1}{2}}$$ the degrees-of-freedom parameter essentially specifies the "normality" of the data, since larger values of $\nu$ make the distribution converge to a normal distribution, while small values (close to zero) result in heavier tails. Thus, the likelihood functions of our model are specified as follows: $$y^{(treat)}_i \sim T(\nu, \mu_1, \sigma_1)$$ $$y^{(placebo)}_i \sim T(\nu, \mu_2, \sigma_2)$$ As a simplifying assumption, we will assume that the degree of normality $\nu$ is the same for both groups. We will, of course, have separate parameters for the means $\mu_k, k=1,2$ and standard deviations $\sigma_k$. Since the means are real-valued, we will apply normal priors on them, and arbitrarily set the hyperparameters to the pooled empirical mean of the data and twice the pooled empirical standard deviation, which applies very diffuse information to these quantities (and importantly, does not favor one or the other *a priori*). $$\mu_k \sim N(\bar{x}, 2s)$$ ``` μ_m = y.value.mean() μ_s = y.value.std() * 2 with pm.Model() as model: group1_mean = pm.Normal('group1_mean', mu=μ_m, sd=μ_s) group2_mean = pm.Normal('group2_mean', mu=μ_m, sd=μ_s) ``` The group standard deviations will be given a uniform prior over a plausible range of values for the variability of the outcome variable, IQ. In Kruschke's original model, he uses a very wide uniform prior for the group standard deviations, from the pooled empirical standard deviation divided by 1000 to the pooled standard deviation multiplied by 1000. This is a poor choice of prior, because very basic prior knowledge about measures of human coginition dictate that the variation cannot ever be as high as this upper bound. IQ is a standardized measure, and hence this constrains how variable a given population's IQ values can be. When you place such a wide uniform prior on these values, you are essentially giving a lot of prior weight on inadmissable values. In this example, there is little practical difference, but in general it is best to apply as much prior information that you have available to the parameterization of prior distributions. We will instead set the group standard deviations to have a $\text{Uniform}(1,10)$ prior: ``` σ_low = 1 σ_high = 10 with model: group1_std = pm.Uniform('group1_std', lower=σ_low, upper=σ_high) group2_std = pm.Uniform('group2_std', lower=σ_low, upper=σ_high) ``` We follow Kruschke by making the prior for $\nu$ exponentially distributed with a mean of 30; this allocates high prior probability over the regions of the parameter that describe the range from normal to heavy-tailed data under the Student-T distribution. ``` with model: ν = pm.Exponential('ν_minus_one', 1/29.) + 1 pm.kdeplot(np.random.exponential(30, size=10000), fill_kwargs={'alpha': 0.5}); ``` Since PyMC3 parameterizes the Student-T in terms of precision, rather than standard deviation, we must transform the standard deviations before specifying our likelihoods. ``` with model: λ1 = group1_std**-2 λ2 = group2_std**-2 group1 = pm.StudentT('drug', nu=ν, mu=group1_mean, lam=λ1, observed=y1) group2 = pm.StudentT('placebo', nu=ν, mu=group2_mean, lam=λ2, observed=y2) ``` Having fully specified our probabilistic model, we can turn our attention to calculating the comparisons of interest in order to evaluate the effect of the drug. To this end, we can specify deterministic nodes in our model for the difference between the group means and the difference between the group standard deviations. Wrapping them in named `Deterministic` objects signals to PyMC that we wish to record the sampled values as part of the output. As a joint measure of the groups, we will also estimate the "effect size", which is the difference in means scaled by the pooled estimates of standard deviation. This quantity can be harder to interpret, since it is no longer in the same units as our data, but the quantity is a function of all four estimated parameters. ``` with model: diff_of_means = pm.Deterministic('difference of means', group1_mean - group2_mean) diff_of_stds = pm.Deterministic('difference of stds', group1_std - group2_std) effect_size = pm.Deterministic('effect size', diff_of_means / np.sqrt((group1_std**2 + group2_std**2) / 2)) ``` Now, we can fit the model and evaluate its output. ``` with model: trace = pm.sample(2000) ``` We can plot the stochastic parameters of the model. PyMC's `plot_posterior` function replicates the informative histograms portrayed in Kruschke (2012). These summarize the posterior distributions of the parameters, and present a 95% credible interval and the posterior mean. The plots below are constructed with the final 1000 samples from each of the 2 chains, pooled together. ``` pm.plot_posterior(trace, var_names=['group1_mean','group2_mean', 'group1_std', 'group2_std', 'ν_minus_one'], color='#87ceeb'); ``` Looking at the group differences, we can conclude that there are meaningful differences between the two groups for all three measures. For these comparisons, it is useful to use zero as a reference value (`ref_val`); providing this reference value yields cumulative probabilities for the posterior distribution on either side of the value. Thus, for the difference in means, 99.4% of the posterior probability is greater than zero, which suggests the group means are credibly different. The effect size and differences in standard deviation are similarly positive. These estimates suggest that the "smart drug" increased both the expected scores, but also the variability in scores across the sample. So, this does not rule out the possibility that some recipients may be adversely affected by the drug at the same time others benefit. ``` pm.plot_posterior(trace, var_names=['difference of means','difference of stds', 'effect size'], ref_val=0, color='#87ceeb'); ``` When `forestplot` is called on a trace with more than one chain, it also plots the potential scale reduction parameter, which is used to reveal evidence for lack of convergence; values near one, as we have here, suggest that the model has converged. ``` pm.forestplot(trace, var_names=['group1_mean', 'group2_mean']); pm.forestplot(trace, var_names=['group1_std', 'group2_std', 'ν_minus_one']); pm.summary(trace, varnames=['difference of means', 'difference of stds', 'effect size']) ``` ## References 1. Goodman SN. Toward evidence-based medical statistics. 1: The P value fallacy. Annals of Internal Medicine. 1999;130(12):995-1004. doi:10.7326/0003-4819-130-12-199906150-00008. 2. Johnson D. The insignificance of statistical significance testing. Journal of Wildlife Management. 1999;63(3):763-772. 3. Kruschke JK. Bayesian estimation supersedes the t test. J Exp Psychol Gen. 2013;142(2):573-603. doi:10.1037/a0029146. The original pymc2 implementation was written by Andrew Straw and can be found here: https://github.com/strawlab/best Ported to PyMC3 by [Thomas Wiecki](https://twitter.com/twiecki) (c) 2015, updated by Chris Fonnesbeck.
github_jupyter
# MSTICpy - Data Uploaders ### Description This notebook provides a guided example of using the Log Analytics and Splunk Data Uploader included with MSTICpy.<br><br> Contents: - How to instanciate Uploaders - Uploading DataFrames - Uploading Files - Uploading Folders ``` #Setup from msticpy.nbtools import nbinit extra_imports = ["msticpy.data.uploaders.splunk_uploader, SplunkUploader", "msticpy.data.uploaders.loganalytics_uploader, LAUploader"] nbinit.init_notebook( namespace=globals(), extra_imports=extra_imports, ); WIDGET_DEFAULTS = { "layout": widgets.Layout(width="95%"), "style": {"description_width": "initial"}, } # Load some sample data df = pd.read_csv('https://raw.githubusercontent.com/microsoft/msticpy/master/tests/testdata/az_net_flows.csv', parse_dates=['TimeGenerated']) df.head(2) ``` ## LogAnalytics Data Uploader Below we collect some details required for our uploader, instanciate our LogAnalytics data uploader and pass our DataFrame loaded above to be uploaded. We are setting the debug flag on our uploader so we can get some additional details on our upload progress. ``` la_ws_id = widgets.Text(description='Workspace ID:') la_ws_key = widgets.Password(description='Workspace Key:') display(la_ws_id) display(la_ws_key) # Instanciate our Uploader la_up = LAUploader(workspace=la_ws_id.value, workspace_secret=la_ws_key.value, debug=True) # Upload our DataFrame la_up.upload_df(data=df, table_name='upload_demo') ``` ### Upload a file We can now upload a file to our Workspace using the same Uploader. We simply pass the path to the file we want to upload, and we can also pass a table name for the data to be uploaded to. ``` la_up.upload_file(file_path='data/alertlist.csv', table_name='upload_demo') ``` ### Upload a folder We can now upload a file to our Workspace using the same Uploader. We simply pass the the path to the folder we want to upload file from. In this case we aren't going to pass a table name, in which case the name will be generated automatically for each file from the file's name. With a folder we get a progress bar showing the progress uploading each file. ``` la_up.upload_folder(folder_path='data/') ``` ## Splunk Data Uploader The Splunk Uploader functions in the same manner as the LogAnalytics one. <br> Below we collect some details required for our uploader, instanciate our Splunk data uploader and pass our DataFrame loaded above to be uploaded. We are setting the debug flag on our uploader so we can get some additional details on our upload progress.<br> When uploading our DataFrame the only difference is that as well as providing a table name (which is represneted as sourcetype in Splunk), we also need to pass a Splunk index that we want to data uploaded to. Also as Splunk uploads data a line at a time we get a progress bar for the file as it uploads. ``` sp_host = widgets.Text(description='Splunk host') sp_user = widgets.Text(description='Username') sp_pwrd = widgets.Password(description='Password') display(sp_host) display(sp_user) display(sp_pwrd) # Instanciate our Uploader spup = SplunkUploader(username=sp_user.value, host=sp_host.value, password=sp_pwrd.value, debug=True) # Upload our DataFrame spup.upload_df(data=df, table_name='upload_test', index_name='upload_test') ``` ### Upload a file We can now upload a file to our Workspace using the same Uploader. We simply pass the path to the file we want to upload along with the index name, and we can also pass a table name for the data to be uploaded to. ``` spup.upload_file(file_path='data/alertlist.csv', index_name='upload_demo', table_name='upload_demo') ``` ### Upload a folder We can now upload a file to our Workspace using the same Uploader. We simply pass the the path to the folder we want to upload file from. In this case we aren't going to pass a table name, in which case the name will be generated automatically for each file from the file's name however we still need to pass and index name. ``` spup.upload_folder(folder_path='data/', index_name='upload_demo') ```
github_jupyter
# Vocabulary Tree for Image Descriptors with Binary Descriptors ## Introduction This notebook describes how to implement a scalable image search with vocabulary tree. The work is largerly based on the article: **Scalable Recognition with a vocabulary tree**, David Nister and Henrik Stewenius link to paper: http://www-inst.eecs.berkeley.edu/~cs294-6/fa06/papers/nister_stewenius_cvpr2006.pdf However some innovations were applied: * Use of **ORB** as image descriptors instead of patented **SIFT** described in the article * Use of **hamming distance** as distance measure among descritors * Use of **bit-wise average** as opposed to numerical average (since ORB is binary and not numerical as SIFT) * **Flexible clustering algorithm** that makes use of custom distance measure and average as above to work with both binary or numerical image descriptors Results seem to indicate good results with the approach cited above from queries in a small dataset. So far, this notebook provides a relatively simple code in python that implements this algorithm ## Solution The algorithm is composed by several steps expressed as functions in the next subsections. * tree data structure * flexible clustering * tree assembly * tree weight update * tree visit * score calculation * image database visit vector * helper functions to read image database and query image descriptors The following section describes a simple test. ``` # pip install opencv-python # pip install opencv-contrib-python import cv2 import numpy as np import os import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec ``` ### Node Data Structure ``` class Node: _nodes = [] # list of all created nodes _total_images = 0 # total number of images def __init__(self, root=False): self._children = {} # dictionary key(img descriptor) -> value self._images = set() # set of image ids if not root: self._index = len(Node._nodes) Node._nodes.append(self) # append node to global list of nodes @property def children(self): return self._children @children.setter def children(self, children): self._children = children @property def images(self): return self._images @images.setter def images(self, images): self._images = images @property def weight(self): images_in_node = len(self._images) if images_in_node > 0: return np.log(Node._total_images/len(self._images)) else: return 0 @property def index(self): return self._index @staticmethod def set_total_images(total_images): Node._total_images = total_images @staticmethod def nodes(): return Node._nodes @staticmethod def init(): Node._nodes = [] def tree_traversal(node): print('node.images='+str(node.images) + ' node.children='+str(len(node.children))) for child_id in node.children: print('node.descriptor='+str(hash(child_id))) tree_traversal(node.children[child_id]) ``` ### Configurable Clustering Algorithms Based on K-Means ``` # clustering image descriptors def clustering(data, k, dissimilarity, average, stop_criteria=0.1, attempts=3): centroids = _choose_initial_centroids(data, k) centroid_labels = None diff = 100000 while diff > stop_criteria: centroid_labels = _find_nearest_centroid(data, centroids, dissimilarity) new_centroids = _calculate_new_centroids(data, centroid_labels, k, average, centroids) diff = _average_centroids_move(centroids, new_centroids) centroids = new_centroids return centroids, centroid_labels def _choose_initial_centroids(data, k): centroid_idxs = np.random.randint(data.shape[0], size=k) return data[centroid_idxs] def _find_nearest_centroid(data, centroids, dissimilarity): centroid_labels = {} for idx in range(len(centroids)): centroid_labels[idx] = [] for item_idx, item in enumerate(data): min_dist = None centroid_id = None for centroid_idx, c in enumerate(centroids): distance = dissimilarity(item, c) if min_dist == None or distance < min_dist: min_dist = distance centroid_id = centroid_idx centroid_labels[centroid_id].append(item_idx) return centroid_labels def _calculate_new_centroids(data, centroid_labels, k, average, original_centroids): centroids = [] for centroid_idx in centroid_labels: if len(centroid_labels[centroid_idx]) == 0: new_centroid = original_centroids[centroid_idx] else: new_centroid = average(data[centroid_labels[centroid_idx]]) centroids.append(new_centroid) return np.array(centroids) def _average_centroids_move(centroids, new_centroids): return np.sum(np.abs(centroids - new_centroids)) ``` ### Assembly Image Descriptor Tree ``` def assembly_tree(descriptors, k, dissimilarity, average, level): level -= 1 if level < 0: #print('deepest level: stop criteria') return {} elif len(descriptors) == 1: #print('single descriptor: stop criteria') key = descriptors[0].tobytes() return { key: Node() } centroids, centroid_labels = clustering(descriptors, k, dissimilarity, average) children = {} for centroid_id, centroid in enumerate(centroids): if len(centroid_labels[centroid_id]) > 0: node = Node() centroid_descriptors = np.array(descriptors[centroid_labels[centroid_id]]) node.children = assembly_tree(centroid_descriptors, k, dissimilarity, average, level) children[centroid.tobytes()] = node return children ``` ### Updating Tree Weights ``` def update_weights(node, img_idx, arr_descriptors, dissimilarity): for descriptor in arr_descriptors: _update_weights_with_descriptor(node, img_idx, descriptor, dissimilarity) # descriptor - query descriptor # arr_descriptors - array of descriptors # return: descritor in arr_descriptors close to descriptor def _nearest_descriptor(descriptor, arr_descriptors, dissimilarity): min_dist = None min_descriptor = None for descriptor_item in arr_descriptors: distance = dissimilarity(descriptor, descriptor_item) if min_dist == None or distance < min_dist: min_dist = distance min_descriptor = descriptor_item return min_descriptor def _convert_to_img_descriptor(descriptor_bytes): return np.frombuffer(descriptor_bytes, dtype=np.uint8) def _update_weights_with_descriptor(node, img_idx, descriptor, dissimilarity): if len(node.children) > 0: arr_descriptors = list(map(_convert_to_img_descriptor, node.children.keys())) nearest_descriptor = _nearest_descriptor(descriptor, arr_descriptors, dissimilarity) child_node = node.children[nearest_descriptor.tobytes()] child_node.images.add(img_idx) _update_weights_with_descriptor(child_node, img_idx, descriptor, dissimilarity) ``` ### Node Visiting ``` def visit_tree(root, descriptors, dissimilarity, with_weight=False): visit_path = np.zeros(len(Node.nodes())) for descriptor in descriptors: _descriptor_visit_tree(root, descriptor, dissimilarity, visit_path) for idx, visit in enumerate(visit_path): visit_path[idx] /= len(descriptors) if with_weight: for idx, visit in enumerate(visit_path): visit_path[idx] *= Node.nodes()[idx].weight return visit_path def _descriptor_visit_tree(node, descriptor, dissimilarity, visit_path): if node.children == {}: return visit_path arr_descriptors = list(map(_convert_to_img_descriptor, node.children.keys())) nearest_descriptor = _nearest_descriptor(descriptor, arr_descriptors, dissimilarity) child_node = node.children[nearest_descriptor.tobytes()] visit_path[child_node.index] += 1 return _descriptor_visit_tree(child_node, descriptor, dissimilarity, visit_path) ``` ### Score Calculation ``` # query_vector = vector of visits multiplied by weights from query image descriptors # dbimg_vector = vector of visits multiplied by weights from database image descriptors def score_calculation(query_vector, dbimg_vector): norm_query_vector = query_vector / (np.sqrt(np.sum(np.power(query_vector, 2)))) norm_dbimg_vector = dbimg_vector / (np.sqrt(np.sum(np.power(query_vector, 2)))) diff = np.abs(norm_query_vector - norm_dbimg_vector) return np.sqrt(np.sum(np.power(diff, 2))) ``` ### Create Matrix for Image Database Visit Vector ``` # root: tree root node # # cvmat_images: list of B&W cvMat images read from cv2.imread # # dissimilarity: dissimilarity function to compare 2 image descriptors # # with_weight: (T/F) should multiply with node weight or just number of visits per node # # descr_extractor: image descriptor extractor (as default ORB) # def dbimg_visit_tree(root, cvmat_images, dissimilarity, with_weight: bool, descr_extractor): # list of visit per database image dbimg_vectors = [] for img in cvmat_images: keypoints, descriptors = descr_extractor.detectAndCompute(img, None) if descriptors is None: descriptors = [] # create visit vector from image and add to list img_vector = visit_tree(root, descriptors, dissimilarity, with_weight) dbimg_vectors.append(img_vector) return np.array(dbimg_vectors) ``` ### Helper Function to Read Image Files ``` def read_images(filenames, black_white=True): images = [] for filename in filenames: images.append(read_image(filename, black_white)) return np.array(images) def read_image(filename, black_white=True): if black_white: image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) else: image = cv2.imread(filaneme, cv2.IMREAD_COLOR) return image def image_descriptors_map(images, descr_extractor): img_descriptors = {} for idx, image in enumerate(images): # extract image descriptors keypoints, descriptors = descr_extractor.detectAndCompute(image, None) img_descriptors[idx] = [] if descriptors is not None: for descriptor in descriptors: img_descriptors[idx].append(descriptor) img_descriptors[idx] = np.array(img_descriptors[idx], dtype=np.uint8) return img_descriptors def image_descriptors(image_descriptors_map): img_descriptors = [] for img_key in image_descriptors_map: img_descriptors.extend(image_descriptors_map[img_key]) return np.array(img_descriptors, dtype=np.uint8) def image_descriptors_from_file(filename, descr_extractor): img = read_image(filename) keypoints, descriptors = descr_extractor.detectAndCompute(img, None) if descriptors is None: descriptors = np.array([], dtype=np.uint8) return descriptors ``` ## Helper Function for Brute Force Descriptor Matching ``` def brute_force_image_descriptors(query_img_descriptors, image_descriptors_map, filenames): bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) distances = {} for img_key in image_descriptors_map: img_descriptors = image_descriptors_map[img_key] if img_descriptors.shape[0] > 0 and query_img_descriptors.shape[0] > 0: matches = bf.match(query_img_descriptors, img_descriptors) dist = [item.distance for item in matches] dist = sorted(dist) dist = np.mean(dist) distances[dist] = filenames[img_key] else: print("img_descriptors empty for image = " + str(img_key) + " set a high distance!") distances[10000] = filenames[img_key] return distances ``` ## Tests ``` filenames = list([ './imgdb/' + filename for filename in os.listdir('./imgdb/') ]) print('database filenames') print('==================') print(str(filenames)) print() query_files = list([ './query/' + filename for filename in os.listdir('./query/') ]) print('query filenames') print('===============') print(str(query_files)) ``` ### Test1: ORB / hamming distance / bit-wise average Next block below defines the parameters for this experiments. * ORB as image descriptor * hamming as distance measure * bit-wise as average strategy * 6 clusters (also means that the vocabulary tree node will have 6 children * 5 max vertical levels for the tree ``` # pre-condition: uint8 arrays - hamming distance def orb_dissimilarity(elem1, elem2): return cv2.norm(elem1, elem2, cv2.NORM_HAMMING) # # pre-condition: uint8 arrays - bit-wise average def orb_average(data): bit_array = np.unpackbits(data, axis=1) avg_array = np.mean(bit_array, axis=0) avg_array = np.round(avg_array).astype(np.uint8) avg_data = np.packbits(avg_array) return avg_data # Set number of images Node.set_total_images(len(filenames)) # Set image descriptor extractor orb_extractor = cv2.ORB_create(750) # Number of clusters clusters = 3 # Tree max level max_level = 6 images = read_images(filenames, black_white=True) print('number of images='+str(images.shape[0])) db_descriptors_map = image_descriptors_map(images, descr_extractor=orb_extractor) db_descriptors = image_descriptors(db_descriptors_map) print('number of descriptors='+str(db_descriptors.shape[0])) Node.init() root = Node(root=True) root.children = assembly_tree(descriptors=db_descriptors, k=clusters, dissimilarity=orb_dissimilarity, average=orb_average, level=max_level) print('number of nodes:' + str(len(Node.nodes()))) for imgkey in db_descriptors_map: print("update weights for image = " + str(imgkey)) update_weights(root, imgkey, db_descriptors_map[imgkey], dissimilarity=orb_dissimilarity) db_visit_matrix = dbimg_visit_tree(root, images, dissimilarity=orb_dissimilarity, with_weight=True, descr_extractor=orb_extractor) print('database image vector size = '+str(db_visit_matrix.shape)) for idx_file, query_file in enumerate(query_files): print('----------------------------------------------') print('query_file='+str(query_file)) print('----------------------------------------------') query_descriptors = image_descriptors_from_file(query_file, descr_extractor=orb_extractor) query_vector = visit_tree(root, descriptors=query_descriptors, dissimilarity=orb_dissimilarity, with_weight=True) scores = {} for idx, db_item_vector in enumerate(db_visit_matrix): scores[score_calculation(query_vector, db_item_vector)] = filenames[idx] voctree_results = [] for score in sorted(scores.keys()): db_filename = scores[score] voctree_results.append(db_filename) ground_truth = brute_force_image_descriptors(query_descriptors, db_descriptors_map, filenames) ground_truth_results = [] for score in sorted(ground_truth.keys()): ground_truth_filename = ground_truth[score] ground_truth_results.append(ground_truth_filename) for idx in range(len(scores)): print("voctree = " + str(voctree_results[idx])) ``` Each column in the image grid shows the results for each image query ``` fig, axarr = plt.subplots(nrows = 2 + len(filenames), ncols = len(query_files)*3, figsize=(20,55)) for idx_file, query_file in enumerate(query_files): print('query_file='+str(query_file)) query_descriptors = image_descriptors_from_file(query_file, descr_extractor=orb_extractor) query_vector = visit_tree(root, descriptors=query_descriptors, dissimilarity=orb_dissimilarity, with_weight=True) scores = {} for idx, db_item_vector in enumerate(db_visit_matrix): scores[score_calculation(query_vector, db_item_vector)] = filenames[idx] axarr[0, idx_file*3].imshow(read_image(query_file), cmap='gray') axarr[0, idx_file*3].set_title(query_file) axarr[0, idx_file*3].axis('off') axarr[0, idx_file*3 + 1].axis('off') axarr[0, idx_file*3 + 2].axis('off') axarr[1, idx_file*3].axis('off') axarr[1, idx_file*3 + 1].axis('off') axarr[1, idx_file*3 + 2].axis('off') axarr[1, idx_file*3].set_title('voctree') axarr[1, idx_file*3 + 1].set_title('ground truth') idx_db = 2 for score in sorted(scores.keys()): db_filename = scores[score] axarr[idx_db, idx_file*3].imshow(read_image(db_filename), cmap='gray') axarr[idx_db, idx_file*3].set_title(round(score, 2)) axarr[idx_db, idx_file*3].set_xticklabels([]) axarr[idx_db, idx_file*3].set_yticklabels([]) idx_db += 1 ground_truth = brute_force_image_descriptors(query_descriptors, db_descriptors_map, filenames) idx_db = 2 for score in sorted(ground_truth.keys()): ground_truth_filename = ground_truth[score] axarr[idx_db, idx_file*3 + 1].imshow(read_image(ground_truth_filename), cmap='gray') axarr[idx_db, idx_file*3 + 1].set_title(round(score,2)) axarr[idx_db, idx_file*3 + 1].set_xticklabels([]) axarr[idx_db, idx_file*3 + 1].set_yticklabels([]) axarr[idx_db, idx_file*3 + 2].axis('off') idx_db += 1 plt.show() ``` ### Test 1 Conclusions Vocabulary tree seems to have work as expected. * when searched with database image (fruits-4) the nearest result had distance 0 as expected since there is an identical image in the database * when searched with very similar image (book) the expected answer (book-1) was returned * when searched with semantically similar images there were good results for (fruits) and (helicopter) but not so good for (shoes). Perhaps using a ground truth by actually comparing each descriptor from database with query could help elucidate if this results with similar images are happening due a limitation in the image descriptor itself.
github_jupyter
# Machine Learning Machine learning is the application of algorithms to extract information from datasets by way of understanding it. This "understanding" usually means fitting a model on the dataset. It overlaps considerably with data mining, where one is usually more concerned with getting the information than with the modeling aspect. It also overlaps with artificial inteligence, mathematical optimization and inferential statistics. Few are experts on machine learning and even fewer can find the best model to match a certain dataset. But ML is becoming so ubiquitous that even school kids need to learn it. From the average person point of view, as long as the validation tests show a good fit, any model is good enough, so the question usually shifts from modeling to easy implementation and good validation procedure. On Python there are several libraries that also stand out, and this is a personal list: - Scikit-learn: considered the best overall, is friendly to newcommers and contains good validation support. - mlpy: competition. - PyBrain: AI and neural networks. - nltk: for natural language processing and text mining. - Theano + Pylearn2: uses the graphical processor, for fast and "deep" learning. - MDP (Modular toolkit for Data Processing): make workflows using scikit-learn and other libs. - Orange: visual framework for ML (similar to what Weka is in Java). Has a bioinformatics plugin. ### [Scikit-learn](http://scikit-learn.org/stable/) [sklearn]:http://1.bp.blogspot.com/-ME24ePzpzIM/UQLWTwurfXI/AAAAAAAAANw/W3EETIroA80/s640/drop_shadows_background.png ![sklearn-outline](http://scikit-learn.org/stable/_static/ml_map.png) This figure was made by the creator of Scikit-learn. While the methods described in it matter less, the distinction between the covered problem classes is more important. You also have to keep in mind that this is only the simple core of ML, and there are entire classes of algorithms that are either not covered by scikit-learn (such as genetic algorithms or most neural networks) or covered in too small detail (bayesian learning). For these there are other specific Python libraries, aditionally a certain class of algorithms may only be available on a certain program or language, and bindings are usually provided for Python. Another classification of ML problems is perhaps even more useful, and hopefully funnier, as it not only separates problem classes but creates social classes among programmers: - Supervised learning: There is a target that we are trying to predict. Datasets for supervised learning methods specifically have several tested outcomes, and the model is using them to fit in. Regression and classification methods require a target. Example: Measured omics datasets with measured phenotypes, and good controls. - Unsupervised learning: No outcome is available, and the typical workflow consists in clustering, visualization and dimensionality reduction (feature selection). Example: "So like I have this gene expression dataset ...", most astronomical measurements, etc. Coincidentally this is what unaccomplished yet highly promising ML experts get to spend most of their time on (because supervised learning is "easy"). - Reinforcement learning: A model is trained on incomplete data, and new data is added while the model improves. Here you find most of the cool sounding algorithms in ML, such the best of neural networks, markov chain monte carlo, bayesian training, etc. Most experts in RL are hired by the financial sector to work on big data (and many use python) for expensive fees. In hardcore science where things are never fully known or fully measured this class gets all the media frenzy. Robotics and gaming are also big players here. - Ensemble learning: Using different ML algorithms on the same problem and improving a model based on all their outcomes. Decision trees and random forests find application here. Bioinformatics usually relies more and more on consensus methods, if for no other reason but to relax angry reviewers. Some observations: - Other learning classes exist, like the structural learning, representation learning or metric learning. They usually focus on different aspects of the dataset such as finding a good representation of the inputs or finding associations among variables. - Many learning algorithms can perform okay on several problem classes. Also, the methodologies can come from different sources, such as statistics, optimization, AI or simple heuristics such as genetic programming or swarm intelligence. Task: - Take some time to explore the [documentation](http://scikit-learn.org/stable/user_guide.html) provided on scikit-learn for one or two methods of your choosing.
github_jupyter
# Example Object-Oriented Access to the PEST Control File The `pst_handler` module with `pyemu.pst` contains the `Pst` class for dealing with pest control files. It relies heavily on `pandas` to deal with tabular sections, such as parameters, observations, and prior information. This jupyter notebook shows how to create a control-file object (instantiate the class or make an instance of the class), how to access attributes of the class, and how to call an instance method. ``` import os import numpy as np import pyemu from pyemu import Pst ``` A PEST control file is required to make the object, and we need to pass the name of the PEST control file as a parameter to the __init__ method for the class. The class instance (or object) is assigned to the variable *p*. ``` pst_name = os.path.join("..", "..", "examples", "henry","pest.pst") p = Pst(pst_name) ``` Now all of the relevant parts of the pest control file are attributes of the object. For example, the parameter_data, observation data, and prior information are available as pandas dataframes. ``` p.parameter_data.head() p.observation_data.head() p.prior_information.head() ``` The client-code can be used to change values in the dataframes that can be written to a new or updated control file using the `write()` method as shown at the end of the notebook. ``` p.parameter_data.loc['global_k', 'parval1'] = 225 p.parameter_data.head() ``` A residual file (`.rei` or `res`) can also be passed to the `resfile` argument at instantiation to enable some simple residual analysis and evaluate if weight adjustments are needed. If `resfile = False`, or not supplied, and if the residual file is in the same directory as the pest control file and has the same base name, it will be accessed automatically: ``` p.res.head() ``` The weights can be updated by changing values in the observation dataframe. ``` p.observation_data.loc['h_obs01_1', 'weight'] = 25.0 p.observation_data.head() ``` The `Pst` class exposes a method, `get()`, to create a new `Pst` instance with a subset of parameters and or observations. For example, make a new PEST control-file object using the first 10 entries from the parameter and observation dataframes. Note this method does not propogate prior information to the new instance: ``` pnew = p.get(p.par_names[:10],p.obs_names[:10]) pnew.prior_information.head() ``` Check the parameter_data and observation_data dataframes for the new object, note that the updated values for `global_k` `parval1` and `h_obs01_1` `weight` are in these dataframes. ``` pnew.parameter_data.head() pnew.observation_data.head() ``` The `write(filename)` method allows you to write a PEST control file with the current state of the object: that is, make a new PEST control file with the current information contained in an object. ``` pnew.write("test.pst") ```
github_jupyter
# [ATM 623: Climate Modeling](../index.ipynb) [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany # Lecture 7: Elementary greenhouse models ## Warning: content out of date and not maintained You really should be looking at [The Climate Laboratory book](https://brian-rose.github.io/ClimateLaboratoryBook) by Brian Rose, where all the same content (and more!) is kept up to date. ***Here you are likely to find broken links and broken code.*** ### About these notes: This document uses the interactive [`Jupyter notebook`](https://jupyter.org) format. The notes can be accessed in several different ways: - The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware - The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb) - A complete snapshot of the notes as of May 2017 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/Notes/index.html). [Also here is a legacy version from 2015](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html). Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab ``` # Ensure compatibility with Python 2 and 3 from __future__ import print_function, division ``` ## Contents 1. [A single layer atmosphere](#section1) 2. [Introducing the two-layer grey gas model](#section2) 3. [Tuning the grey gas model to observations](#section3) 4. [Level of emission](#section4) 5. [Radiative forcing in the 2-layer grey gas model](#section5) 6. [Radiative equilibrium in the 2-layer grey gas model](#section6) 7. [Summary](#section7) ____________ <a id='section1'></a> ## 1. A single layer atmosphere ____________ We will make our first attempt at quantifying the greenhouse effect in the simplest possible greenhouse model: a single layer of atmosphere that is able to absorb and emit longwave radiation. <img src='../images/1layerAtm_sketch.png'> ### Assumptions - Atmosphere is a single layer of air at temperature $T_a$ - Atmosphere is **completely transparent to shortwave** solar radiation. - The **surface** absorbs shortwave radiation $(1-\alpha) Q$ - Atmosphere is **completely opaque to infrared** radiation - Both surface and atmosphere emit radiation as **blackbodies** ($\sigma T_s^4, \sigma T_a^4$) - Atmosphere radiates **equally up and down** ($\sigma T_a^4$) - There are no other heat transfer mechanisms We can now use the concept of energy balance to ask what the temperature need to be in order to balance the energy budgets at the surface and the atmosphere, i.e. the **radiative equilibrium temperatures**. ### Energy balance at the surface \begin{align} \text{energy in} &= \text{energy out} \\ (1-\alpha) Q + \sigma T_a^4 &= \sigma T_s^4 \\ \end{align} The presence of the atmosphere above means there is an additional source term: downwelling infrared radiation from the atmosphere. We call this the **back radiation**. ### Energy balance for the atmosphere \begin{align} \text{energy in} &= \text{energy out} \\ \sigma T_s^4 &= A\uparrow + A\downarrow = 2 \sigma T_a^4 \\ \end{align} which means that $$ T_s = 2^\frac{1}{4} T_a \approx 1.2 T_a $$ So we have just determined that, in order to have a purely **radiative equilibrium**, we must have $T_s > T_a$. *The surface must be warmer than the atmosphere.* ### Solve for the radiative equilibrium surface temperature Now plug this into the surface equation to find $$ \frac{1}{2} \sigma T_s^4 = (1-\alpha) Q $$ and use the definition of the emission temperature $T_e$ to write $$ (1-\alpha) Q = \sigma T_e^4 $$ *In fact, in this model, $T_e$ is identical to the atmospheric temperature $T_a$, since all the OLR originates from this layer.* Solve for the surface temperature: $$ T_s = 2^\frac{1}{4} T_e $$ Putting in observed numbers, $T_e = 255$ K gives a surface temperature of $$T_s = 303 ~\text{K}$$ This model is one small step closer to reality: surface is warmer than atmosphere, emissions to space generated in the atmosphere, atmosphere heated from below and helping to keep surface warm. BUT our model now overpredicts the surface temperature by about 15ºC (or K). Ideas about why? Basically we just need to read our **list of assumptions** above and realize that none of them are very good approximations: - Atmosphere absorbs some solar radiation. - Atmosphere is NOT a perfect absorber of longwave radiation - Absorption and emission varies strongly with wavelength *(atmosphere does not behave like a blackbody)*. - Emissions are not determined by a single temperature $T_a$ but by the detailed *vertical profile* of air temperture. - Energy is redistributed in the vertical by a variety of dynamical transport mechanisms (e.g. convection and boundary layer turbulence). ____________ <a id='section2'></a> ## 2. Introducing the two-layer grey gas model ____________ Let's generalize the above model just a little bit to build a slighly more realistic model of longwave radiative transfer. We will address two shortcomings of our single-layer model: 1. No vertical structure 2. 100% longwave opacity Relaxing these two assumptions gives us what turns out to be a very useful prototype model for **understanding how the greenhouse effect works**. ### Assumptions - The atmosphere is **transparent to shortwave radiation** (still) - Divide the atmosphere up into **two layers of equal mass** (the dividing line is thus at 500 hPa pressure level) - Each layer **absorbs only a fraction $\epsilon$ ** of whatever longwave radiation is incident upon it. - We will call the fraction $\epsilon$ the **absorptivity** of the layer. - Assume $\epsilon$ is the same in each layer This is called the **grey gas** model, where grey here means the emission and absorption have no spectral dependence. We can think of this model informally as a "leaky greenhouse". Note that the assumption that $\epsilon$ is the same in each layer is appropriate if the absorption is actually carried out by a gas that is **well-mixed** in the atmosphere. Out of our two most important absorbers: - CO$_2$ is well mixed - H$_2$O is not (mostly confined to lower troposphere due to strong temperature dependence of the saturation vapor pressure). But we will ignore this aspect of reality for now. In order to build our model, we need to introduce one additional piece of physics known as **Kirchoff's Law**: $$ \text{absorptivity} = \text{emissivity} $$ So if a layer of atmosphere at temperature $T$ absorbs a fraction $\epsilon$ of incident longwave radiation, it must emit $$ \epsilon ~\sigma ~T^4 $$ both up and down. ### A sketch of the radiative fluxes in the 2-layer atmosphere <img src='../images/2layerAtm_sketch.png'> - Surface temperature is $T_s$ - Atm. temperatures are $T_0, T_1$ where $T_0$ is closest to the surface. - absorptivity of atm layers is $\epsilon$ - Surface emission is $\sigma T_s^4$ - Atm emission is $\epsilon \sigma T_0^4, \epsilon \sigma T_1^4$ (up and down) - Absorptivity = emissivity for atmospheric layers - a fraction $(1-\epsilon)$ of the longwave beam is **transmitted** through each layer ### A fun aside: symbolic math with the `sympy` package This two-layer grey gas model is simple enough that we can work out all the details algebraically. There are three temperatures to keep track of $(T_s, T_0, T_1)$, so we will have 3x3 matrix equations. We all know how to work these things out with pencil and paper. But it can be tedious and error-prone. Symbolic math software lets us use the computer to automate a lot of tedious algebra. The [sympy](http://www.sympy.org/en/index.html) package is a powerful open-source symbolic math library that is well-integrated into the scientific Python ecosystem. ``` import sympy # Allow sympy to produce nice looking equations as output sympy.init_printing() # Define some symbols for mathematical quantities # Assume all quantities are positive (which will help simplify some expressions) epsilon, T_e, T_s, T_0, T_1, sigma = \ sympy.symbols('epsilon, T_e, T_s, T_0, T_1, sigma', positive=True) # So far we have just defined some symbols, e.g. T_s # We have hard-coded the assumption that the temperature is positive sympy.ask(T_s>0) ``` ### Longwave emissions Let's denote the emissions from each layer as \begin{align} E_s &= \sigma T_s^4 \\ E_0 &= \epsilon \sigma T_0^4 \\ E_1 &= \epsilon \sigma T_1^4 \end{align} recognizing that $E_0$ and $E_1$ contribute to **both** the upwelling and downwelling beams. ``` # Define these operations as sympy symbols # And display as a column vector: E_s = sigma*T_s**4 E_0 = epsilon*sigma*T_0**4 E_1 = epsilon*sigma*T_1**4 E = sympy.Matrix([E_s, E_0, E_1]) E ``` ### Shortwave radiation Since we have assumed the atmosphere is transparent to shortwave, the incident beam $Q$ passes unchanged from the top to the surface, where a fraction $\alpha$ is reflected upward out to space. ``` # Define some new symbols for shortwave radiation Q, alpha = sympy.symbols('Q, alpha', positive=True) # Create a dictionary to hold our numerical values tuned = {} tuned[Q] = 341.3 # global mean insolation in W/m2 tuned[alpha] = 101.9/Q.subs(tuned) # observed planetary albedo tuned[sigma] = 5.67E-8 # Stefan-Boltzmann constant in W/m2/K4 tuned # Numerical value for emission temperature #T_e.subs(tuned) ``` ### Upwelling beam Let $U$ be the upwelling flux of longwave radiation. The upward flux from the surface to layer 0 is $$ U_0 = E_s $$ (just the emission from the suface). ``` U_0 = E_s U_0 ``` Following this beam upward, we can write the upward flux from layer 0 to layer 1 as the sum of the transmitted component that originated below layer 0 and the new emissions from layer 0: $$ U_1 = (1-\epsilon) U_0 + E_0 $$ ``` U_1 = (1-epsilon)*U_0 + E_0 U_1 ``` Continuing to follow the same beam, the upwelling flux above layer 1 is $$ U_2 = (1-\epsilon) U_1 + E_1 $$ ``` U_2 = (1-epsilon) * U_1 + E_1 ``` Since there is no more atmosphere above layer 1, this upwelling flux is our Outgoing Longwave Radiation for this model: $$ OLR = U_2 $$ ``` U_2 ``` The three terms in the above expression represent the **contributions to the total OLR that originate from each of the three levels**. Let's code this up explicitly for future reference: ``` # Define the contributions to OLR originating from each level OLR_s = (1-epsilon)**2 *sigma*T_s**4 OLR_0 = epsilon*(1-epsilon)*sigma*T_0**4 OLR_1 = epsilon*sigma*T_1**4 OLR = OLR_s + OLR_0 + OLR_1 print( 'The expression for OLR is') OLR ``` ### Downwelling beam Let $D$ be the downwelling longwave beam. Since there is no longwave radiation coming in from space, we begin with ``` fromspace = 0 D_2 = fromspace ``` Between layer 1 and layer 0 the beam contains emissions from layer 1: $$ D_1 = (1-\epsilon)D_2 + E_1 = E_1 $$ ``` D_1 = (1-epsilon)*D_2 + E_1 D_1 ``` Finally between layer 0 and the surface the beam contains a transmitted component and the emissions from layer 0: $$ D_0 = (1-\epsilon) D_1 + E_0 = \epsilon(1-\epsilon) \sigma T_1^4 + \epsilon \sigma T_0^4$$ ``` D_0 = (1-epsilon)*D_1 + E_0 D_0 ``` This $D_0$ is what we call the **back radiation**, i.e. the longwave radiation from the atmosphere to the surface. ____________ <a id='section3'></a> ## 3. Tuning the grey gas model to observations ____________ In building our new model we have introduced exactly one parameter, the absorptivity $\epsilon$. We need to choose a value for $\epsilon$. We will tune our model so that it **reproduces the observed global mean OLR** given **observed global mean temperatures**. To get appropriate temperatures for $T_s, T_0, T_1$, let's revisit the [global, annual mean lapse rate plot from NCEP Reanalysis data](Lecture06 -- Radiation.ipynb) from the previous lecture. ### Temperatures First, we set $$T_s = 288 \text{ K} $$ From the lapse rate plot, an average temperature for the layer between 1000 and 500 hPa is $$ T_0 = 275 \text{ K}$$ Defining an average temperature for the layer between 500 and 0 hPa is more ambiguous because of the lapse rate reversal at the tropopause. We will choose $$ T_1 = 230 \text{ K}$$ From the graph, this is approximately the observed global mean temperature at 275 hPa or about 10 km. ``` # add to our dictionary of values: tuned[T_s] = 288. tuned[T_0] = 275. tuned[T_1] = 230. tuned ``` ### OLR From the [observed global energy budget](Lecture01 -- Planetary energy budget.ipynb) we set $$ OLR = 238.5 \text{ W m}^{-2} $$ ### Solving for $\epsilon$ We wrote down the expression for OLR as a function of temperatures and absorptivity in our model above. We just need to equate this to the observed value and solve a **quadratic equation** for $\epsilon$. This is where the real power of the symbolic math toolkit comes in. Subsitute in the numerical values we are interested in: ``` # the .subs() method for a sympy symbol means # substitute values in the expression using the supplied dictionary # Here we use observed values of Ts, T0, T1 OLR2 = OLR.subs(tuned) OLR2 ``` We have a quadratic equation for $\epsilon$. Now use the `sympy.solve` function to solve the quadratic: ``` # The sympy.solve method takes an expression equal to zero # So in this case we subtract the tuned value of OLR from our expression eps_solution = sympy.solve(OLR2 - 238.5, epsilon) eps_solution ``` There are two roots, but the second one is unphysical since we must have $0 < \epsilon < 1$. Just for fun, here is a simple of example of *filtering a list* using powerful Python *list comprehension* syntax: ``` # Give me only the roots that are between zero and 1! list_result = [eps for eps in eps_solution if 0<eps<1] print( list_result) # The result is a list with a single element. # We need to slice the list to get just the number: eps_tuned = list_result[0] print( eps_tuned) ``` We conclude that our tuned value is $$ \epsilon = 0.586 $$ This is the absorptivity that guarantees that our model reproduces the observed OLR given the observed tempertures. ``` tuned[epsilon] = eps_tuned tuned ``` ____________ <a id='section4'></a> ## 4. Level of emission ____________ Even in this very simple greenhouse model, there is **no single level** at which the OLR is generated. The three terms in our formula for OLR tell us the contributions from each level. ``` OLRterms = sympy.Matrix([OLR_s, OLR_0, OLR_1]) OLRterms ``` Now evaluate these expressions for our tuned temperature and absorptivity: ``` OLRtuned = OLRterms.subs(tuned) OLRtuned ``` So we are getting about 67 W m$^{-2}$ from the surface, 79 W m$^{-2}$ from layer 0, and 93 W m$^{-2}$ from the top layer. In terms of fractional contributions to the total OLR, we have (limiting the output to two decimal places): ``` sympy.N(OLRtuned / 239., 2) ``` Notice that the largest single contribution is coming from the top layer. This is in spite of the fact that the emissions from this layer are weak, because it is so cold. Comparing to observations, the actual contribution to OLR from the surface is about 22 W m$^{-2}$ (or about 9% of the total), not 67 W m$^{-2}$. So we certainly don't have all the details worked out yet! As we will see later, to really understand what sets that observed 22 W m$^{-2}$, we will need to start thinking about the spectral dependence of the longwave absorptivity. ____________ <a id='section5'></a> ## 5. Radiative forcing in the 2-layer grey gas model ____________ Adding some extra greenhouse absorbers will mean that a greater fraction of incident longwave radiation is absorbed in each layer. Thus **$\epsilon$ must increase** as we add greenhouse gases. Suppose we have $\epsilon$ initially, and the absorptivity increases to $\epsilon_2 = \epsilon + \delta_\epsilon$. Suppose further that this increase happens **abruptly** so that there is no time for the temperatures to respond to this change. **We hold the temperatures fixed** in the column and ask how the radiative fluxes change. **Do you expect the OLR to increase or decrease?** Let's use our two-layer leaky greenhouse model to investigate the answer. The components of the OLR before the perturbation are ``` OLRterms ``` After the perturbation we have ``` delta_epsilon = sympy.symbols('delta_epsilon') OLRterms_pert = OLRterms.subs(epsilon, epsilon+delta_epsilon) OLRterms_pert ``` Let's take the difference ``` deltaOLR = OLRterms_pert - OLRterms deltaOLR ``` To make things simpler, we will neglect the terms in $\delta_\epsilon^2$. This is perfectly reasonably because we are dealing with **small perturbations** where $\delta_\epsilon << \epsilon$. Telling `sympy` to set the quadratic terms to zero gives us ``` deltaOLR_linear = sympy.expand(deltaOLR).subs(delta_epsilon**2, 0) deltaOLR_linear ``` Recall that the three terms are the contributions to the OLR from the three different levels. In this case, the **changes** in those contributions after adding more absorbers. Now let's divide through by $\delta_\epsilon$ to get the normalized change in OLR per unit change in absorptivity: ``` deltaOLR_per_deltaepsilon = \ sympy.simplify(deltaOLR_linear / delta_epsilon) deltaOLR_per_deltaepsilon ``` Now look at the **sign** of each term. Recall that $0 < \epsilon < 1$. **Which terms in the OLR go up and which go down?** **THIS IS VERY IMPORTANT, SO STOP AND THINK ABOUT IT.** The contribution from the **surface** must **decrease**, while the contribution from the **top layer** must **increase**. **When we add absorbers, the average level of emission goes up!** ### "Radiative forcing" is the change in radiative flux at TOA after adding absorbers In this model, only the longwave flux can change, so we define the radiative forcing as $$ R = - \delta OLR $$ (with the minus sign so that $R$ is positive when the climate system is gaining extra energy). We just worked out that whenever we add some extra absorbers, the emissions to space (on average) will originate from higher levels in the atmosphere. What does this mean for OLR? Will it increase or decrease? To get the answer, we just have to sum up the three contributions we wrote above: ``` R = -sum(deltaOLR_per_deltaepsilon) R ``` Is this a positive or negative number? The key point is this: **It depends on the temperatures, i.e. on the lapse rate.** ### Greenhouse effect for an isothermal atmosphere Stop and think about this question: If the **surface and atmosphere are all at the same temperature**, does the OLR go up or down when $\epsilon$ increases (i.e. we add more absorbers)? Understanding this question is key to understanding how the greenhouse effect works. #### Let's solve the isothermal case We will just set $T_s = T_0 = T_1$ in the above expression for the radiative forcing. ``` R.subs([(T_0, T_s), (T_1, T_s)]) ``` which then simplifies to ``` sympy.simplify(R.subs([(T_0, T_s), (T_1, T_s)])) ``` #### The answer is zero For an isothermal atmosphere, there is **no change** in OLR when we add extra greenhouse absorbers. Hence, no radiative forcing and no greenhouse effect. Why? The level of emission still must go up. But since the temperature at the upper level is the **same** as everywhere else, the emissions are exactly the same. ### The radiative forcing (change in OLR) depends on the lapse rate! For a more realistic example of radiative forcing due to an increase in greenhouse absorbers, we can substitute in our tuned values for temperature and $\epsilon$. We'll express the answer in W m$^{-2}$ for a 1% increase in $\epsilon$. The three components of the OLR change are ``` deltaOLR_per_deltaepsilon.subs(tuned) * 0.01 ``` And the net radiative forcing is ``` R.subs(tuned) * 0.01 ``` So in our example, **the OLR decreases by 2.2 W m$^{-2}$**, or equivalently, the radiative forcing is +2.2 W m$^{-2}$. What we have just calculated is this: *Given the observed lapse rates, a small increase in absorbers will cause a small decrease in OLR.* The greenhouse effect thus gets stronger, and energy will begin to accumulate in the system -- which will eventually cause temperatures to increase as the system adjusts to a new equilibrium. ____________ <a id='section6'></a> ## 6. Radiative equilibrium in the 2-layer grey gas model ____________ In the previous section we: - made no assumptions about the processes that actually set the temperatures. - used the model to calculate radiative fluxes, **given observed temperatures**. - stressed the importance of knowing the lapse rates in order to know how an increase in emission level would affect the OLR, and thus determine the radiative forcing. A key question in climate dynamics is therefore this: **What sets the lapse rate?** It turns out that lots of different physical processes contribute to setting the lapse rate. Understanding how these processes acts together and how they change as the climate changes is one of the key reasons for which we need more complex climate models. For now, we will use our prototype greenhouse model to do the most basic lapse rate calculation: the **radiative equilibrium temperature**. We assume that - the only exchange of energy between layers is longwave radiation - equilibrium is achieved when the **net radiative flux convergence** in each layer is zero. ### Compute the radiative flux convergence First, the **net upwelling flux** is just the difference between flux up and flux down: ``` # Upwelling and downwelling beams as matrices U = sympy.Matrix([U_0, U_1, U_2]) D = sympy.Matrix([D_0, D_1, D_2]) # Net flux, positive up F = U-D F ``` #### Net absorption is the flux convergence in each layer (difference between what's coming in the bottom and what's going out the top of each layer) ``` # define a vector of absorbed radiation -- same size as emissions A = E.copy() # absorbed radiation at surface A[0] = F[0] # Compute the convergence for n in range(2): A[n+1] = -(F[n+1]-F[n]) A ``` #### Radiative equilibrium means net absorption is ZERO in the atmosphere The only other heat source is the **shortwave heating** at the **surface**. In matrix form, here is the system of equations to be solved: ``` radeq = sympy.Equality(A, sympy.Matrix([(1-alpha)*Q, 0, 0])) radeq ``` Just as we did for the 1-layer model, it is helpful to rewrite this system using the definition of the **emission temperture** $T_e$ $$ (1-\alpha) Q = \sigma T_e^4 $$ ``` radeq2 = radeq.subs([((1-alpha)*Q, sigma*T_e**4)]) radeq2 ``` In this form we can see that we actually have a **linear system** of equations for a set of variables $T_s^4, T_0^4, T_1^4$. We can solve this matrix problem to get these as functions of $T_e^4$. ``` # Solve for radiative equilibrium fourthpower = sympy.solve(radeq2, [T_s**4, T_1**4, T_0**4]) fourthpower ``` This produces a dictionary of solutions for the fourth power of the temperatures! A little manipulation gets us the solutions for temperatures that we want: ``` # need the symbolic fourth root operation from sympy.simplify.simplify import nthroot fourthpower_list = [fourthpower[key] for key in [T_s**4, T_0**4, T_1**4]] solution = sympy.Matrix([nthroot(item,4) for item in fourthpower_list]) # Display result as matrix equation! T = sympy.Matrix([T_s, T_0, T_1]) sympy.Equality(T, solution) ``` In more familiar notation, the radiative equilibrium solution is thus \begin{align} T_s &= T_e \left( \frac{2+\epsilon}{2-\epsilon} \right)^{1/4} \\ T_0 &= T_e \left( \frac{1+\epsilon}{2-\epsilon} \right)^{1/4} \\ T_1 &= T_e \left( \frac{ 1}{2 - \epsilon} \right)^{1/4} \end{align} Plugging in the tuned value $\epsilon = 0.586$ gives ``` Tsolution = solution.subs(tuned) # Display result as matrix equation! sympy.Equality(T, Tsolution) ``` Now we just need to know the Earth's emission temperature $T_e$! (Which we already know is about 255 K) ``` # Here's how to calculate T_e from the observed values sympy.solve(((1-alpha)*Q - sigma*T_e**4).subs(tuned), T_e) # Need to unpack the list Te_value = sympy.solve(((1-alpha)*Q - sigma*T_e**4).subs(tuned), T_e)[0] Te_value ``` #### Now we finally get our solution for radiative equilibrium ``` # Output 4 significant digits Trad = sympy.N(Tsolution.subs([(T_e, Te_value)]), 4) sympy.Equality(T, Trad) ``` Compare these to the values we derived from the **observed lapse rates**: ``` sympy.Equality(T, T.subs(tuned)) ``` The **radiative equilibrium** solution is substantially **warmer at the surface** and **colder in the lower troposphere** than reality. This is a very general feature of radiative equilibrium, and we will see it again very soon in this course. ____________ <a id='section7'></a> ## 7. Summary ____________ ## Key physical lessons - Putting a **layer of longwave absorbers** above the surface keeps the **surface substantially warmer**, because of the **backradiation** from the atmosphere (greenhouse effect). - The **grey gas** model assumes that each layer absorbs and emits a fraction $\epsilon$ of its blackbody value, independent of wavelength. - With **incomplete absorption** ($\epsilon < 1$), there are contributions to the OLR from every level and the surface (there is no single **level of emission**) - Adding more absorbers means that **contributions to the OLR** from **upper levels** go **up**, while contributions from the surface go **down**. - This upward shift in the weighting of different levels is what we mean when we say the **level of emission goes up**. - The **radiative forcing** caused by an increase in absorbers **depends on the lapse rate**. - For an **isothermal atmosphere** the radiative forcing is zero and there is **no greenhouse effect** - The radiative forcing is positive for our atmosphere **because tropospheric temperatures tends to decrease with height**. - Pure **radiative equilibrium** produces a **warm surface** and **cold lower troposphere**. - This is unrealistic, and suggests that crucial heat transfer mechanisms are missing from our model. ### And on the Python side... Did we need `sympy` to work all this out? No, of course not. We could have solved the 3x3 matrix problems by hand. But computer algebra can be very useful and save you a lot of time and error, so it's good to invest some effort into learning how to use it. Hopefully these notes provide a useful starting point. ### A follow-up assignment You are now ready to tackle [Assignment 5](../Assignments/Assignment05 -- Radiative forcing in a grey radiation atmosphere.ipynb), where you are asked to extend this grey-gas analysis to many layers. For more than a few layers, the analytical approach we used here is no longer very useful. You will code up a numerical solution to calculate OLR given temperatures and absorptivity, and look at how the lapse rate determines radiative forcing for a given increase in absorptivity. <div class="alert alert-success"> [Back to ATM 623 notebook home](../index.ipynb) </div> ____________ ## Version information ____________ ``` %load_ext version_information %version_information sympy ``` ____________ ## Credits The author of this notebook is [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany. It was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php) Development of these notes and the [climlab software](https://github.com/brian-rose/climlab) is partially supported by the National Science Foundation under award AGS-1455071 to Brian Rose. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation. ____________
github_jupyter
Using pickle to predict unknown data. ``` #Main program #clean the memory #in ipython %reset -f #in python import gc gc.collect() # data analysis and wrangling import pandas as pd import numpy as np import random as rnd from scipy import stats # visualization import seaborn as sns import matplotlib.pyplot as plt #show plots in the Jupyter Notebook %matplotlib inline #configure visualization defaults sns.set(style='white', context='notebook', palette='deep') sns.set_style('white') # machine learning from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression, Ridge from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier, GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from yellowbrick.classifier import ClassPredictionError from sklearn.naive_bayes import GaussianNB from yellowbrick.classifier import ClassificationReport pd.set_option('display.max_columns', 200) pd.set_option('display.max_rows', 200) #these piece of codes are learned from the Data Sciences Certficate course from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import precision_score from sklearn.metrics import mean_squared_error as mse from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC, NuSVC, SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import OneHotEncoder, LabelEncoder from sklearn.linear_model import LogisticRegressionCV, LogisticRegression, SGDClassifier from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier import pickle # Imports - you'll need some of these later, but it's traditional to put them all at the beginning. # Downloading from the data server only need once. import os import csv import json #from collections import Counter from operator import itemgetter from requests import get def download(download_url, output_file): """ Downloads a URL and writes it to the specified path. The "path" is like the mailing address for the file - it tells the function where on your computer to send it! Also note the use of "with" to automatically close files - this is a good standard practice to follow. """ with open(output_file,'wb') as f: response = get(download_url) f.write(response.content) ## Write the Python to execute the function and download the file here: url = "http://ftp.ncbi.nlm.nih.gov/geo/series/FSE/113nnn/GSE113486/matrix/GSE112264_series_matrix.txt.gz" path = "C:/BigData/DSCert/input/GSE112264_series_matrix.txt.gz" #Acquire data #The Python Pandas packages helps us work with our datasets. We start by acquiring the "master" data generated by computer #we will split the data into training and testing datasets later. # #downloaded master file for this project: bladder_cancer_file = "C:/Users/Liu_PC/Documents/Georgetown/GSE113486_series_matrix.txt" #df1 = pd.read_csv(bladder_cancer_file, delimiter="\t",skiprows = 0, header = None) #disease_status = df1.iloc[[46],:] #df3 =pd.concat([df.iloc[[25],:], df.iloc[[2],:], df.drop([2, 25], axis=0)], axis=0) #print(disease_status) #df = pd.read_csv(bladder_cancer_file, delimiter="\t", skiprows = 73) #df.shape ro1 = list(range(0,50)) ro2 = list(range(51,73)) d = ro1 + ro2 print(d) df = pd.read_csv(bladder_cancer_file, delimiter="\t", skiprows = d, skipfooter =1, header = None, engine ='python') #df = pd.read_csv(bladder_cancer_file, delimiter="\t", skiprows =50, skipfooter =1, header = None, engine ='python') df.head(20) #df3 = df3.drop([5,6,7,8,9,10,11,12,13,14,15,16,17,18,20,21,22,23,24], axis=0) df.tail(5) df1 = df.T df1.loc[0, 0] = 'Cancer_type' df1.loc[0, 1] = 'Sample_ID' del df #now the dataframe is indexable new_header = df1.iloc[0] #grab the first row for the header df1 = df1[1:] #take the data less the header row df1.columns = new_header df1.to_csv("Bladder_CancerTestingSet.CleanedData.csv", sep="\t", index = False) #df3 =pd.concat([df.iloc[[25],:], df.iloc[[2],:], df.drop([2, 25], axis=0)], axis=0) #print(disease_status) df1.head(10) df = pd.read_csv("Bladder_CancerTestingSet.CleanedData.csv", delimiter="\t") #df = df.T df.head(10) df['Cancer_type'] = df['Cancer_type'].str.replace('disease status: ', '') df = df.set_index("Sample_ID") df['Cancer_type'] = df['Cancer_type'].map(lambda x: 1 if x == "Bladder Cancer" else (0 if x == "Non-cancer control" else 2 )).astype(int) #print(disease_status) #df['Cancer_type'] = df['Cancer_type'].str.replace('disease status: ', '') del df1 df.head(10) plt.hist(df['Cancer_type']) plt.title('Class Distribution') plt.show() #Cancer_Features = df.iloc[:,2:-1] good_features = ['MIMAT0019953', 'MIMAT0019810', 'MIMAT0018967', 'MIMAT0025846', 'MIMAT0026636', 'MIMAT0019869', 'MIMAT0018977', 'MIMAT0024615', 'MIMAT0019212', 'MIMAT0004794', 'MIMAT0019064', 'MIMAT0019946', 'MIMAT0022259'] Cancer_Features = df[good_features] Cancer_Labels = df['Cancer_type'] X = Cancer_Features.values y = Cancer_Labels.values #y = np.array(Cancer_Labels.values) Sample_IDs = list(Cancer_Features.index.values) print("Feature shape: ") print(X.shape) print("Target shape: ") print(y.shape) #print(y) pickle_in = open("pickle_SVC.pkl", "rb") SVC_model = pickle.load(pickle_in) prediction_score = SVC_model.score(X, y) print(prediction_score) pickle_in = open("pickle_RFC.pkl", "rb") RFC_model = pickle.load(pickle_in) prediction_score = RFC_model.score(X, y) print(prediction_score) pickle_in = open("pickle_ETC.pkl", "rb") ETC_model = pickle.load(pickle_in) prediction_score = ETC_model.score(X, y) print(prediction_score) pickle_in = open("pickle_SGDC.pkl", "rb") SGDC_model = pickle.load(pickle_in) prediction_score = SGDC_model.score(X, y) print(prediction_score) pickle_in = open("pickle_LSVC.pkl", "rb") LSVC_model = pickle.load(pickle_in) prediction_score = LSVC_model.score(X, y) print(prediction_score) ```
github_jupyter
# <b>Object Detection with AutoML Vision</b> <br> ## <b>Learning Objectives</b> ## 1. Learn how to create and import an image dataset to AutoML Vision 1. Learn how to train an AutoML object detection model 1. Learn how to evaluate a model trained with AutoML 1. Learn how to deploy a model trained with AutoML 1. Learn how to predict on new test data with AutoML In this notebook we will use AutoML Vision Object Detection to train a machine learning model capable of detecting multiple objects in a given image and provides information about the objects and their location within the image. We will start by creating a dataset for AutoML Vision and then import a publicly available set of images into it. After that we will train, evaluate and deploy the AutoML model trained for this dataset. Ultimately we show how to send prediction requests to our model through the deployed API. ## <b>AutoML Vision Setup</b> ## Before we begin make sure you have [created a project on the GCP Console](https://cloud.google.com/vision/automl/object-detection/docs/before-you-begin) and enabled the AutoML and Cloud Storage APIs ### <b> Install AutoML and Cloud Storage package </b> ### <b>Caution: Run the following command and restart the kernel afterwards.</b> ``` pip freeze | grep google-cloud-automl==1.0.1 || pip install google-cloud-automl==1.0.1 pip freeze | grep google-cloud-storage==1.27.0 || pip install google-cloud-storage==1.27.0 import os from google.cloud import automl import tensorflow as tf ``` <br> ### <b>Set the correct environment variables </b> ### The following variables should be updated according to your own environment: ``` PROJECT_ID = "YOUR_PROJECT_ID" # Replace with your PROJECT ID SERVICE_ACCOUNT = "YOUR_SERVICE_ACCOUNT_NAME" # Replace with a name of your choice ZONE = "us-central1"# Make sure the zone is set to "us-central1" ``` <br> The following variables are computed from the one you set above, and should not be modified: ``` PWD = os.path.abspath(os.path.curdir) SERVICE_KEY_PATH = os.path.join(PWD, "{0}.json".format(SERVICE_ACCOUNT)) SERVICE_ACCOUNT_EMAIL="{0}@{1}.iam.gserviceaccount.com".format(SERVICE_ACCOUNT, PROJECT_ID) print(SERVICE_ACCOUNT_EMAIL) print(PROJECT_ID) # Exporting the variables into the environment to make them available to all the subsequent cells os.environ["PROJECT_ID"] = PROJECT_ID os.environ["SERVICE_ACCOUNT"] = SERVICE_ACCOUNT os.environ["SERVICE_KEY_PATH"] = SERVICE_KEY_PATH os.environ["SERVICE_ACCOUNT_EMAIL"] = SERVICE_ACCOUNT_EMAIL os.environ["ZONE"] = ZONE ``` <br> ### <b>Switching the right project and zone</b> ### ``` %%bash gcloud config set project $PROJECT_ID gcloud config set compute/region $ZONE ``` <br> ### <b>Create a service account and generate service key</b> ### Before we can run our program we need to get it authenticated. For that, we first need to generate a service account. A service account is a special type of Google account intended for non-human users (i.e., services) that need to authenticate and be authorized to access data through Google APIs (in our case the AutoML and Cloud Storage API). After the service account has been created it needs to be associated with a service account key, which is a json file holding everything that the client needs to authenticate with the service endpoint. ``` %%bash gcloud iam service-accounts list | grep $SERVICE_ACCOUNT || gcloud iam service-accounts create $SERVICE_ACCOUNT %%bash test -f $SERVICE_KEY_PATH || gcloud iam service-accounts keys create $SERVICE_KEY_PATH \ --iam-account $SERVICE_ACCOUNT_EMAIL echo "Service key: $(ls $SERVICE_KEY_PATH)" ``` <br> ### <b>Make the key available to google clients for authentication</b> ### AutoML API will check this environement variable to see where the key is located and use it to authenticate ``` os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = SERVICE_KEY_PATH ``` <br> ### <b>Grant service account required role permissions</b> ### After we have created our service account and associated it with the service key we need to assign some permissions through a role. For this example we only need to grant our service account the automl and storage admin role so it has permission to complete specific actions on the resources of your project. ``` %%bash gcloud projects add-iam-policy-binding $PROJECT_ID \ --member "serviceAccount:$SERVICE_ACCOUNT_EMAIL" \ --role "roles/automl.admin" \ --role "roles/storage.admin" ``` <br> ## <b>Step 1: Preparing and formatting training data</b> ## The first step in creating a custom model with the AutoML Vision is to prepare the training data. In this case the training dataset that is composed of images along with information identifying the location (through bounding boxes coordinates) and type of objects (through labels) in the images. Here are some constraints some general rules for preparing an AutoML object detection dataset: * The following image formats are supported: JPEG, PNG, GIF, BMP, or ICO. Maximum file size is 30MB per image. * AutoML Vision models can not generally predict labels that humans can't assign. So, if a human can't be trained to assign labels by looking at the image for 1-2 seconds, the model likely can't be trained to do it either. * It is recommended to have about 1000 training images per label (i.e. object type you want to detect in the images). For each label you must have at least 10 images, each with at least one annotation (bounding box and the label). In general, the more images per label you have the better your model will perform. <br> ### <b>Training vs. evaluation datasets</b> ### When training machine learning models you typically divide the dataset usually into three separate datasets: 1. a training dataset 1. a validation dataset 1. a test dataset A training dataset is used to build a model. The model being trained tries multiple parameters while searching for patterns in the training data. During the process of pattern identification, AutoML Vision Object Detection uses the validation dataset to test the parameters of the model. AutoML Vision Object Detection chooses the best-performing algorithms and patterns from all options identified during the training stage. After the best performing algorithms and patterns have been identified, they are tested for error rate, quality, and accuracy using the test dataset. Both a validation and a test dataset are used in order to avoid bias in the model. During the validation stage, optimal model parameters are used. Using these optimal model parameters can result in biased metrics. Using the test dataset to assess the quality of the model after the validation stage provides the training process with an unbiased assessment of the quality of the model. By default, AutoML Vision Object Detection splits your dataset randomly into 3 separate sets (you don't need to do it yourself!): * 80% of images are used for training. * 10% of images are used for hyper-parameter tuning and/or to decide when to stop training. * 10% of images are used for evaluating the model. These images are not used in training. <br> ### <b>Create a CSV file with image URIs and labels</b> ### Once your image files have been uploaded to a Cloud Storage bucket (`gs://bucket-name-vcm`), you must create a CSV file that lists all of the URI of the uploaded images, along with bounding box information and the object labels. The dataset will contain one row per bounding box in the image, so an image that has two bounding boxes will have two corresponding rows in the CSV file sharing the same image URI. The CSV file can have any filename, must be in the same bucket as your image files, must be UTF-8 encoded, and must end with a `.csv` extension. In the example below, rows 1 and 2 reference the same image that has 2 annotations `(car,0.1,0.1,,,0.3,0.3,,)` and `(bike,.7,.6,,,.8,.9,,)`. The first element of the annotation is the object label in the bounding box, while the rest are the coordinates of the bounding box within the image (see below for details). Row 3 refers to an image that has only 1 annotation `(car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3)`, while row 4 references an image with no annotations. The first column corresponds to the data split, the second column to the image URI, and the last columns hold the annotations. **Example:** ```bash TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 TEST,gs://folder/im3.png,,,,,,,,, ``` Each row above has these columns: ` 1. <b>Which dataset is the content in the row being assigned to.</b> - `TRAIN`, `VALIDATE`, `TEST` or `UNASSIGNED` 1. <b>What content is being annotated.</b> - It contains the GCS URI for the image 1. <b>A label that identifies how the object is categorized. 1. <b>A bounding box for an object in the image.</b> The **bounding box** for an object can be specified in two ways: * with only 2 vertices (consisting of a set of x and y coordinates) if they are diagonally opposite points of the rectangle ``` (x_relative_min,y_relative_min,,,x_relative_max,y_relative_max,,) ``` * with all 4 vertices ``` (x_relative_min,y_relative_min,x_relative_max,y_relative_min,x_relative_max,y_relative_max,x_relative_min,y_relative_max) ``` Each vertex is specified by x, y coordinate values. These coordinates must be a float in the 0 to 1 range, where 0 represents the minimum x or y value, and 1 represents the greatest x or y value. For example, `(0,0)` represents the top left corner, and `(1,1)` represents the bottom right corner; a bounding box for the entire image is expressed as `(0,0,,,1,1,,)`, or `(0,0,1,0,1,1,0,1)`. AutoML API does not require a specific vertex ordering. Additionally, if 4 specified vertices don't form a rectangle parallel to image edges, AutoML API calculates and uses vertices that do form such a rectangle. ### Generating a CSV file for unlabeled images stored in Cloud Storage ### If you already have unlabeled images uploaded to Cloud Storage and would like to generate a CSV pointing to them, run this code in Cloud Shell: ``` for f in $(gsutil ls gs://YOUR_BUCKET/YOUR_IMAGES_FOLDER/); do echo UNASSIGNED,$f; done >> labels.csv; ``` Then copy the generated CSV file into a Google Storage Bucket: ```gsutil cp labels.csv gs://YOUR_BUCKET/labels.csv``` Then after uploading the images to AutoML Object Detection, you can use Cloud Vision API's [Object Localizer](https://cloud.google.com/vision/docs/object-localizer) feature to help build your dataset by getting more generalized labels and bounding boxes for objects in an image. <br> ## <b>Step 2: Create a dataset</b> ## Next step is to create and name an empty dataset that will eventually hold the training data for the model. ``` DATASET_NAME = "salad_dataset" # Replace with desired dataset name client = automl.AutoMlClient() # A resource that represents Google Cloud Platform location. project_location = client.location_path(PROJECT_ID, ZONE) metadata = automl.types.ImageObjectDetectionDatasetMetadata() dataset = automl.types.Dataset( display_name=display_name, image_object_detection_dataset_metadata=metadata, ) # Create a dataset with the dataset metadata in the region. response = client.create_dataset(project_location, dataset) created_dataset = response.result() # Display the dataset information print("Dataset name: {}".format(created_dataset.name)) print("Dataset id: {}".format(created_dataset.name.split("/")[-1])) ``` <br> ## <b>Step 3: Import images into a dataset</b> ## After you have created a dataset, prepared and formated your training data, it's time to import that training data into our created dataset. In this notebook we will use a publicly available "Salads" training dataset that is located at `gs://cloud-ml-data/img/openimage/csv/salads_ml_use.csv`. This dataset contains images of salads with bounding boxes and labels around their ingredients (e.g., tomato, seafood, etc.). So the model we will train will be able to take as input the image of a salad and determine the ingredients composing the salad as well as the location of the ingredients on the salad image. Please note the import might take a couple of minutes to finish depending on the file size. ``` DATASET_ID = format(created_dataset.name.split("/")[-1]) DATASET_URI = "gs://cloud-ml-data/img/openimage/csv/salads_ml_use.csv" # Get the full path of the dataset. dataset_full_id = client.dataset_path( PROJECT_ID, ZONE, DATASET_ID ) # Get the multiple Google Cloud Storage URIs input_uris = path.split(",") gcs_source = automl.types.GcsSource(input_uris=input_uris) input_config = automl.types.InputConfig(gcs_source=gcs_source) # Import data from the input URI response = client.import_data(dataset_full_id, input_config) print("Processing import...") print("Data imported. {}".format(response.result())) ``` <br> ## <b>Step 4: Train your AutoML Vision model</b> ## Once you are happy with your created dataset you can proceed with training the model. <i>Please note</i> - training time takes approximately <b>1-3h</b> ``` MODEL_NAME = "salads" # Replace with desired model name # A resource that represents Google Cloud Platform location. project_location = client.location_path(PROJECT_ID, ZONE) # Leave model unset to use the default base model provided by Google # train_budget_milli_node_hours: The actual train_cost will be equal or # less than this value. # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageobjectdetectionmodelmetadata training_metadata = automl.types.ImageObjectDetectionModelMetadata( train_budget_milli_node_hours=24000 ) model = automl.types.Model( display_name=display_name, dataset_id=dataset_id, image_object_detection_model_metadata=metadata, ) # Create a model with the model metadata in the region. training_results = client.create_model(project_location, model) print("Training operation name: {}".format(response.operation.name)) print("Training started...") ``` <br> ### <b>Information about the trained model</b> ### ``` MODEL_ID = format(model.name.split("/")[-1]) # Get the full path of the model. model_full_id = client.model_path(PROJECT_ID, ZONE, MODEL_ID) model = client.get_model(model_full_id) # Retrieve deployment state. if model.deployment_state == automl.enums.Model.DeploymentState.DEPLOYED: deployment_state = "deployed" else: deployment_state = "undeployed" # Display the model information. print("Model name: {}".format(model.name)) print("Model id: {}".format(model.name.split("/")[-1])) print("Model display name: {}".format(model.display_name)) print("Model create time:") print("\tseconds: {}".format(model.create_time.seconds)) print("\tnanos: {}".format(model.create_time.nanos)) print("Model deployment state: {}".format(deployment_state)) ``` <br> ## <b>Step 5: Evaluate the model</b> ## After training a model, Cloud AutoML Vision Object Detection uses images from the TEST image set to evaluate the quality and accuracy of the new model. It provides an aggregate set of evaluation metrics indicating how well the model performs overall, as well as evaluation metrics for each category label, indicating how well the model performs for that label. By running the cell below you can list evaluation metrics for that model. ``` print("List of model evaluations:") for evaluation in client.list_model_evaluations(model_full_id, ""): print("Model evaluation name: {}".format(evaluation.name)) print( "Model annotation spec id: {}".format( evaluation.annotation_spec_id ) ) print("Create Time:") print("\tseconds: {}".format(evaluation.create_time.seconds)) print("\tnanos: {}".format(evaluation.create_time.nanos / 1e9)) print( "Evaluation example count: {}".format( evaluation.evaluated_example_count ) ) print( "Object detection model evaluation metrics: {}\n\n".format( evaluation.image_object_detection_evaluation_metrics ) ) ``` <br> ## <b>Step 6: Deploy the model</b> ## Once we are happy with the performance of our trained model, we can deploy it so that it will be available for predictions through an API. ``` response = client.deploy_model(model_full_id) print("Model deployment finished. {}".format(response.result())) ``` <br> ## <b>Step 7: Send prediction request</b> ## In this example we will invoke an individual prediction from an image that is stored in our project's Cloud storage bucket. Object detection models output many bounding boxes for an input image. For the output we are expecting that each box comes with: 1. a label and 1. a score of confidence. ``` TEST_IMAGE_PATH = "gs://your-bucket-name-vcm/your-folder-name/your-image.jpg" # Replace with a Cloud storage bucket uploaded image of your choice prediction_client = automl.PredictionServiceClient() # Read the file. with tf.io.gfile.GFile(TEST_IMAGE_PATH, "rb") as content_file: content = content_file.read() image = automl.types.Image(image_bytes=content) payload = automl.types.ExamplePayload(image=image) # params is additional domain-specific parameters. # score_threshold is used to filter the result # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest params = {"score_threshold": "0.8"} response = prediction_client.predict(model_full_id, payload, params) ``` Now that we have the response object from the deployed model, we can inspect its predictions (i.e., the bounding boxes and objects that the model has detected from the images we sent to it in the cell above): ``` print("Prediction results:") for result in response.payload: print("Predicted class name: {}".format(result.display_name)) print( "Predicted class score: {}".format( result.image_object_detection.score ) ) bounding_box = result.image_object_detection.bounding_box print("Normalized Vertices:") for vertex in bounding_box.normalized_vertices: print("\tX: {}, Y: {}".format(vertex.x, vertex.y)) ```
github_jupyter
``` # Copyright 2021 NVIDIA Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================ ``` <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;"> # Two-Stage Recommender Systems This notebook is created using the latest stable [merlin-tensorflow-training](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/merlin/containers/merlin-tensorflow-training/tags) container. In large scale recommender systems pipelines, the size of the item catalog (number of unique items) might be in the order of millions. At such scale, a typical setup is having two-stage pipeline, where a faster candidate retrieval model quickly extracts thousands of relevant items and a then a more powerful ranking model (i.e. with more features and more powerful architecture) ranks the top-k items that are going to be displayed to the user. For ML-based candidate retrieval model, as it needs to quickly score millions of items for a given user, a popular choices are models that can produce recommendation scores by just computing the dot product the user embeddings and item embeddings. Popular choices of such models are **Matrix Factorization**, which learns low-rank user and item embeddings, and the **Two-Tower architecture**, which is a neural network with two MLP towers where both user and item features are fed to generate user and item embeddings in the output. ### Dataset In this notebook, we are building a Two-Tower model for Item Retrieval task using synthetic datasets that are mimicking the real [Ali-CCP: Alibaba Click and Conversion Prediction](https://tianchi.aliyun.com/dataset/dataDetail?dataId=408#1) dataset. ### Learning objectives - Preparing the data with NVTabular - Training and evaluating Two-Tower model with Merlin Models - Exporting the model for deployment ### Importing Libraries ``` import os import nvtabular as nvt from nvtabular.ops import * from merlin.models.utils.example_utils import workflow_fit_transform from merlin.schema.tags import Tags import merlin.models.tf as mm from merlin.io.dataset import Dataset import tensorflow as tf # disable INFO and DEBUG logging everywhere import logging logging.disable(logging.WARNING) ``` ### Feature Engineering with NVTabular Let's generate synthetic train and validation dataset objects. ``` from merlin.datasets.synthetic import generate_data DATA_FOLDER = os.environ.get("DATA_FOLDER", "/workspace/data/") NUM_ROWS = os.environ.get("NUM_ROWS", 1000000) SYNTHETIC_DATA = eval(os.environ.get("SYNTHETIC_DATA", "True")) if SYNTHETIC_DATA: train, valid = generate_data("aliccp-raw", int(NUM_ROWS), set_sizes=(0.7, 0.3)) else: train = nvt.Dataset(DATA_FOLDER + '/train/*.parquet') valid = nvt.Dataset(DATA_FOLDER + '/valid/*.parquet') # define output path for the processed parquet files output_path = os.path.join(DATA_FOLDER, "processed") ``` We keep only positive interactions where clicks==1 in the dataset with `Filter()` op. ``` user_id = ["user_id"] >> Categorify() >> TagAsUserID() item_id = ["item_id"] >> Categorify() >> TagAsItemID() item_features = ["item_category", "item_shop", "item_brand"] >> Categorify() >> TagAsItemFeatures() user_features = ['user_shops', 'user_profile', 'user_group', 'user_gender', 'user_age', 'user_consumption_2', 'user_is_occupied', 'user_geography', 'user_intentions', 'user_brands', 'user_categories'] \ >> Categorify() >> TagAsUserFeatures() inputs = user_id + item_id + item_features + user_features + ['click'] outputs = inputs >> Filter(f=lambda df: df["click"] == 1) ``` With `transform_aliccp` function, we can execute fit() and transform() on the raw dataset applying the operators defined in the NVTabular workflow pipeline above. The processed parquet files are saved to output_path. ``` from merlin.datasets.ecommerce import transform_aliccp transform_aliccp((train, valid), output_path, nvt_workflow=outputs) ``` ## Building a Two-Tower Model with Merlin Models We will use Two-Tower Model for item retrieval task. Real-world large scale recommender systems have hundreds of millions of items (products) and users. Thus, these systems often composed of two stages: candidate generation (retrieval) and ranking (scoring the retrieved items). At candidate generation step, a subset of relevant items from large item corpus is retrieved. You can read more about two stage Recommender Systems here. In this example, we're going to focus on the retrieval stage. A Two-Tower Model consists of item (candidate) and user (query) encoder towers. With two towers, the model can learn representations (embeddings) for queries and candidates separately. <img src="./images/TwoTower.png" width="30%"> Image Adapted from: [Off-policy Learning in Two-stage Recommender Systems](https://dl.acm.org/doi/abs/10.1145/3366423.3380130) We use the `schema` object to define our model. ``` output_path train = Dataset(os.path.join(output_path, 'train', '*.parquet')) valid = Dataset(os.path.join(output_path, 'valid', '*.parquet')) schema = train.schema schema = schema.select_by_tag([Tags.ITEM_ID, Tags.USER_ID, Tags.ITEM, Tags.USER]) ``` We can print out the feature column names. ``` schema.column_names ``` We expect the label names to be empty. ``` label_names = schema.select_by_tag(Tags.TARGET).column_names label_names ``` ### Negative sampling Many datasets for recommender systems contain implicit feedback with logs of user interactions like clicks, add-to-cart, purchases, music listening events, rather than explicit ratings that reflects user preferences over items. To be able to learn from implicit feedback, we use the general (and naive) assumption that the interacted items are more relevant for the user than the non-interacted ones. In Merlin Models we provide some scalable negative sampling algorithms for the Item Retrieval Task. In particular, we use in this example the in-batch sampling algorithm which uses the items interacted by other users as negatives within the same mini-batch. ### Building the Model Now, let's build our Two-Tower model. In a nutshell, we aggregate all user features to feed in user tower and feed the item features to the item tower. Then we compute the positive score by multiplying the user embedding with the item embedding and sample negative items (read more about negative sampling [here](https://openreview.net/pdf?id=824xC-SgWgU) and [here](https://medium.com/mlearning-ai/overview-negative-sampling-on-recommendation-systems-230a051c6cd7)), whose item embeddings are also multiplied by the user embedding. Then we apply the loss function on top of the positive and negative scores. ``` model = mm.TwoTowerModel( schema, query_tower=mm.MLPBlock([128, 64], no_activation_last_layer=True), loss="categorical_crossentropy", samplers=[mm.InBatchSampler()], embedding_options = mm.EmbeddingOptions(infer_embedding_sizes=True), metrics=[mm.RecallAt(10), mm.NDCGAt(10)] ) ``` Let's explain the parameters in the TwoTowerModel(): - no_activation_last_layer: when set True, no activation is used for top hidden layer. Learn more [here](https://storage.googleapis.com/pub-tools-public-publication-data/pdf/b9f4e78a8830fe5afcf2f0452862fb3c0d6584ea.pdf). - infer_embedding_sizes: when set True, automatically defines the embedding dimension from the feature cardinality in the schema **Metrics:** The following information retrieval metrics are used to compute the Top-10 accuracy of recommendation lists containing all items: - **Normalized Discounted Cumulative Gain (NDCG@10)**: NDCG accounts for rank of the relevant item in the recommendation list and is a more fine-grained metric than HR, which only verifies whether the relevant item is among the top-k items. - **Recall@10**: Also known as HitRate@n when there is only one relevant item in the recommendation list. Recall just verifies whether the relevant item is among the top-n items. We need to initialize the dataloaders. ``` model.compile(optimizer='adam', run_eagerly=False) model.fit(train, validation_data=valid, batch_size=4096, epochs=3) ``` ## Exporting Retrieval Models So far we have trained and evaluated our Retrieval model. Now, the next step is to deploy our model and generate top-K recommendations given a user (query). We can efficiently serve our model by indexing the trained item embeddings into an **Approximate Nearest Neighbors (ANN)** engine. Basically, for a given user query vector, that is generated passing the user features into user tower of retrieval model, we do an ANN search query to find the ids of nearby item vectors, and at serve time, we score user embeddings over all indexed top-K item embeddings within the ANN engine. In doing so, we need to export - user (query) tower - item and user features - item embeddings #### Save User (query) tower We are able to save the user tower model as a TF model to disk. The user tower model is needed to generate a user embedding vector when a user feature vector <i>x</i> is fed into that model. ``` query_tower = model.retrieval_block.query_block() query_tower.save('query_tower') ``` #### Extract and save User features With `unique_rows_by_features` utility function we can easily extract both unique user and item features tables as cuDF dataframes. Note that for user features table, we use `USER` and `USER_ID` tags. ``` from merlin.models.utils.dataset import unique_rows_by_features user_features = unique_rows_by_features(train, Tags.USER, Tags.USER_ID).compute().reset_index(drop=True) user_features.head() user_features.shape # save to disk user_features.to_parquet('user_features.parquet') ``` #### Extract and save Item features ``` item_features = unique_rows_by_features(train, Tags.ITEM, Tags.ITEM_ID).compute().reset_index(drop=True) item_features.head() # save to disk item_features.to_parquet('item_features.parquet') ``` #### Extract and save Item embeddings ``` item_embs = model.item_embeddings(Dataset(item_features, schema=schema), batch_size=1024) item_embs_df = item_embs.compute(scheduler="synchronous") item_embs_df # select only embedding columns item_embeddings = item_embs_df.iloc[:, 4:] item_embeddings.head() # save to disk item_embeddings.to_parquet('item_embeddings.parquet') ``` That's it. You have learned how to train and evaluate your Two-Tower retrieval model, and then how to export the required components to be able to deploy this model to generate recommendations. In order to learn more on serving a model to [Triton Inference Server](https://github.com/triton-inference-server/server), please explore the examples in the [Merlin](https://github.com/NVIDIA-Merlin/Merlin) and [Merlin Systems](https://github.com/NVIDIA-Merlin/systems) repos.
github_jupyter
# Import libs ``` import sys import os sys.path.append('..') from eflow.foundation import DataPipeline,DataFrameTypes from eflow.data_analysis import FeatureAnalysis, NullAnalysis from eflow.model_analysis import ClassificationAnalysis from eflow.data_pipeline_segments import FeatureTransformer, DataEncoder from eflow.utils.modeling_utils import optimize_model_grid from eflow.utils.eflow_utils import get_type_holder_from_pipeline, remove_unconnected_pipeline_segments from eflow.utils.math_utils import get_unbalanced_threshold from eflow.utils.sys_utils import create_dir_structure from eflow.utils.eflow_utils import create_color_dict_for_features from eflow.utils.pandas_utils import data_types_table, value_counts_table, suggest_removal_features, missing_values_table, df_auto_binning from eflow.widgets import ColorLabelingWidget import pandas as pd import numpy as np import pickle from nltk.corpus import words import matplotlib.pyplot as plt import copy from IPython.display import clear_output from IPython.core.getipython import get_ipython import ipython_blocking import nltk # # Additional add ons # !pip install pandasgui # !pip install pivottablejs # clear_output() %matplotlib notebook %matplotlib inline ``` #### Download natural language processing utils ``` nltk.download('wordnet') nltk.download('words') nltk.download('punkt') ``` ## Juypter notebook generating cells ### Important Note: Replace if set to True will remove all the contents of whatever cell it is called in. But it can be undone with a simple CMD + Z. 🙂 ``` # Author: http://tinyurl.com/y6mghyzl def create_new_cell(contents, replace=False): """ Desc: Creates a new jupyter cell. """ shell = get_ipython() shell.set_next_input(contents, replace=replace) def __format_list_to_string(list_name, list_contents): """ Desc: Converts a list to a string and adds newlines for formating. """ output_str = f"{list_name} = [" escape_seq_count = 0 final_index = len(list_contents) - 1 req_spacing = len(output_str) for i,element in enumerate(list_contents): if i == final_index: if isinstance(element,str): output_str += f'\"{element}\"' else: output_str += f'{element}' else: if isinstance(element,str): output_str += f'\"{element}\",' else: output_str += f'{element},' if len(output_str.split("\n")[escape_seq_count]) > 78: output_str += "\n" output_str += (" " * req_spacing) escape_seq_count += 1 output_str += "]" return output_str def create_new_cell_with_removal_features(df, replace=True): """ Desc: Creates a new cell block with a list of suggested features to remove. Args: df: Pandas DataFrame object replace: Boolean to determine replacing the current cell. """ # Get suggestions for removal cell_content = __format_list_to_string("removal_features", suggest_removal_features(df)) # Add a sort of calling card of the function that created it cell_content = f"# create_new_cell_with_removal_features(df,replace={replace})\n" + cell_content create_new_cell(cell_content, replace=replace) def create_new_cell_with_null_removal_features(df, null_threshold=.25, replace=True): """ Desc: Creates a new cell block with a list of suggested features to remove based on nulls. Args: df: Pandas DataFrame object null_threshold: Any features that contain x% percent of nulls are suggested. replace: Boolean to determine replacing the current cell. """ mis_val = df.isnull().sum() mis_val_percent = df.isnull().sum() / len(df) cell_content = f"# create_new_cell_with_null_removal_features(df,null_threshold={null_threshold},replace={replace})\n" cell_content += __format_list_to_string("remove_null_features", mis_val_percent[mis_val_percent > null_threshold].index.to_list()) # Add a calling card of the function that created it create_new_cell(cell_content, replace=replace) def create_new_cell_with_feature_value_color_dict(df, df_features, value_limit=50, replace=True): """ Desc: Creates a new cell block with a dict of suggested feature value colors. Args: df: Pandas DataFrame object df_features: DataFrameTypes object. null_threshold: Any features that contain x% percent of nulls are suggested. value_limit: Limit the amount of feature_values until the system will ignore the feature all together for dict generation. replace: Boolean to determine replacing the current cell. """ feature_value_color_dict = create_color_dict_for_features(df, df_features, value_limit) # Add a sort of calling card of the function that created it cell_content = "" cell_content += f"# create_new_cell_with_feature_value_color_dict(df,df_features,value_limit={value_limit},replace={replace})\n" cell_content += "feature_value_color_dict=dict()" feature_count = 0 for feature_name, feature_value_color in feature_value_color_dict.items(): if feature_value_color_dict[feature_name].keys(): cell_content += f"\nfeature_value_color_dict[\"{feature_name}\"] = dict()" else: cell_content += f"\n\n# The feature '{feature_name}' has to many values! Asserting assumption that you don't want to give colors to each!" for feature_value, color in feature_value_color.items(): color = feature_value_color_dict[feature_name][feature_value] if feature_name in df_features.bool_features() or feature_name in df_features.categorical_features(): try: feature_value = int(float(feature_value)) except: pass if isinstance(feature_value,str): feature_value = f"\"{feature_value}\"" else: feature_value = f"{feature_value}" if color is None: cell_content += f"\nfeature_value_color_dict[\"{feature_name}\"][{feature_value}] = None" else: cell_content += f"\nfeature_value_color_dict[\"{feature_name}\"][{feature_value}] = \"{color}\"" cell_content += "\n" create_new_cell(cell_content, replace=replace) def create_new_cell_with_categorical_dict(df, df_features, value_limit=50, replace=True): """ Desc: Creates a new cell block with a dict of Args: df: Pandas DataFrame object df_features: DataFrameTypes object. value_limit: Limit the amount of feature_values until the system will ignore the feature all together for dict generation. replace: Boolean to determine replacing the current cell. """ cell_content = "" cell_content += f"# create_new_cell_with_categorical_dict(df,df_features,value_limit={value_limit},replace={replace})\n" cell_content += "categorical_value_dict = dict()\n" categorical_value_dict = dict() for feature_name in df_features.categorical_features(): # Find and sort feature values feature_values = df[feature_name].value_counts(sort=False).index.to_list() feature_values = [str(val) for val in feature_values] feature_values.sort() # Create feature cat dict cat_found = False categorical_value_dict[feature_name] = dict() for val in feature_values: try: categorical_value_dict[feature_name][int(val)] = "" cat_found = True except ValueError: pass # Delete feature name if no categories are found if not cat_found: del categorical_value_dict[feature_name] for feature_name,cat_val_dict in categorical_value_dict.items(): if len(cat_val_dict.keys()) < value_limit: cell_content += f"categorical_value_dict[\"{feature_name}\"]=dict()\n" for cat,val in cat_val_dict.items(): if isinstance(val,str): cell_content += f"categorical_value_dict[\"{feature_name}\"][{cat}] = \"{val}\"\n" else: cell_content += f"categorical_value_dict[\"{feature_name}\"][{cat}] = {val}\n" else: cell_content += f"\n\n# The feature '{feature_name}' has to many values! Asserting assumption that you don't want to give encode to each!" create_new_cell(cell_content, replace=replace) def create_new_cell_with_value_representation(df, df_features, value_limit=50, replace=True): """ Desc: Creates a new cell block with a dict of suggested feature value colors. Args: df: Pandas DataFrame object df_features: DataFrameTypes object. value_limit: Limit the amount of feature_values until the system will ignore the feature all together for dict generation. replace: Boolean to determine replacing the current cell. """ feature_value_representation = dict() for feature_name in df_features.string_features(): feature_value_representation[feature_name] = dict() for val in df[feature_name].dropna().value_counts(sort=False).index.to_list(): print(val) if isinstance(val,str): if len(val) == 0: continue feature_value_representation[feature_name][val] = "" if len(feature_value_representation[feature_name].keys()) >= 50: break if not len(feature_value_representation[feature_name].keys()): del feature_value_representation[feature_name] cell_content = "" cell_content += f"# create_new_cell_with_value_representation(df,df_features,value_limit={value_limit},replace={replace})\n" cell_content += "feature_value_representation = dict()\n" for feature_name,val_repr_dict in feature_value_representation.items(): if len(val_repr_dict.keys()) < value_limit: cell_content += f"feature_value_representation[\"{feature_name}\"] = dict()\n" for val,reprs in val_repr_dict.items(): if isinstance(val,str): cell_content += f"feature_value_representation[\"{feature_name}\"][\"{val}\"] = " else: cell_content += f"feature_value_representation[\"{feature_name}\"][{val}] = " if isinstance(reprs,str): cell_content += f"\"{reprs}\"\n" else: cell_content += f"{reprs}\n" else: cell_content += f"\n\n# The feature '{feature_name}' has to many values! Asserting assumption that you don't want to give representation to to each!" cell_content += "\n" create_new_cell(cell_content, replace=replace) def create_new_cell_with_binned_features(df, df_features, bins=5, replace=True): """ Desc: Creates a new cell block with a list of suggested bins and labels for each feature. Args: df:pd.Dataframe Pandas DataFrame object. df_features: DataFrameTypes object. bins:int The amount of bins to give to apply to each feature replace:bool Boolean to determine replacing the current cell. """ # Add a sort of calling card of the function that created it cell_content = f"# create_new_cell_with_binned_features(df,df_features,bins={bins},replace={replace})\n" for feature_name in df_features.continuous_numerical_features(): bins,labels = df_auto_binning(df, df_features, feature_name, bins=5) cell_content += f"feature_name = \"{feature_name}\"\n" cell_content += __format_list_to_string("bins", bins) cell_content += "\n" cell_content += __format_list_to_string("labels", labels) cell_content += f"\ndf_features.set_feature_binning(feature_name,\n" cell_content += " bins,\n" cell_content += " labels)\n" cell_content += "\n\n" create_new_cell(cell_content, replace=replace) ``` ## Declare Project Variables ### Interaction required ``` dataset_path = "" dataset_name = "" # ----- inspect_data_project_dir = f"{dataset_name}/Before Cleaning" # ----- notebook_mode = True # ----- display_value_counts = False ``` # Import dataset ``` df = pd.read_csv(dataset_path) shape_df = pd.DataFrame.from_dict({'Rows': [df.shape[0]], 'Columns': [df.shape[1]]}) display(shape_df) display(df.head(30)) data_types_table(df) ``` ## Remove/Declare any unwanted features ### Interaction required Note: When starting a new project uncomment the function to get suggestions and then run the cell again. ``` create_new_cell_with_removal_features(df,replace=True) df.drop(columns=removal_features, inplace=True) data_types_table(df) ``` ## Gui tools for quick analysis dataframes Great interface; pauses the program; comment on/off at free will. You will need to reset kernel after use more than likely. ``` # from pandasgui import show as qt_display # qt_display(df) # %matplotlib inline # pivot_ui(df, # outfile_path='Piviot_Table_JS.html') ``` # Any basic manipulation of features #### What I mean by this is say you want to represent a feature slightly different than it is currently displaying. Note: that whatever maniuplation you do here you should bring to each notebook's section of "Any basic manipulation of features" ## Skim through Value Counts ``` if display_value_counts: for feature_name in df.columns: print(f'******* Feature: {feature_name} *******') print(f'Type: {df[feature_name].dtype}') display(value_counts_table(df, feature_name)) print("-------" * 4 + "\n\n") ``` # Mark target feature; set to None if not needed ### Interaction required ### Supervised learning problems (Can be set to None) ``` target_feature = "" try: if target_feature: df[target_feature] except KeyError: raise KeyError(f"The target feature \'{target_feature}\' was not found in the dataframe!" + " Please select a valid feature from the dataframe") if target_feature: print(f"Target feature '{target_feature}'") print("----" * 10) target_amount = len(df[target_feature].dropna().value_counts().index) value_count_df = value_counts_table(df, target_feature) if target_amount < 1: display(value_count_df) elif target_amount > 25: display(value_count_df) print("Value count is above 25 asserting that this is probably a continous data stream!") else: # Change arg 'max_binary_threshold' to see changes in threshold max_unbalanced_class_threshold, min_unbalanced_class_threshold = get_unbalanced_threshold(target_amount) print(f"max_unbalanced_class_threshold = {max_unbalanced_class_threshold * 100:.3f}%") print(f"min_unbalanced_class_threshold = {min_unbalanced_class_threshold * 100:.3f}%") display(value_count_df) index = 0 for percentage in value_count_df["Percantage"]: percentage = float(percentage[:-1])/100 if percentage >= max_unbalanced_class_threshold or percentage <= min_unbalanced_class_threshold: print(f"The value '{value_count_df.index.values[index]}' is causing the target feature to be unbalanced.\n" + "This could cause a model to not properly generalize itself.") print("---" * 10 + "\n") index += 1 ``` # Load/Init DataFrameTypes object. This object is used to store an abstracted form of what a feature 'should be' rather than what the pandas dataframe object says it is. In this case we will be specifying all features correct types. Comment out/remove depending on how you want your design flow to be. ``` df_features = DataFrameTypes(df, ignore_nulls=True, fix_numeric_features=True, fix_string_features=True, target_feature=target_feature, notebook_mode=notebook_mode) ``` ## Make any changes to 'df_features' that automated type assertions messed up. Ex: Sometimes df_features will think a feature is a category when it isn't. Move to proper types. ``` print("df_features types:") df_features.display_features(display_dataframes=True, notebook_mode=notebook_mode) df_features.set_feature_to_bool(feature_name=[]) df_features.set_feature_to_integer(feature_name=[]) df_features.set_feature_to_float(feature_name=[]) df_features.set_feature_to_string(feature_name=[]) df_features.set_feature_to_datetime(feature_name=[]) df_features.set_feature_to_categorical(feature_name=[]) print("df_features types:") df_features.display_features(display_dataframes=True, notebook_mode=notebook_mode) print("Dataframe's types:") data_types_table(df) ``` # Colors and palletes for features ### Remove any unwanted values found or any unwanted features to be color coded. ``` create_new_cell_with_feature_value_color_dict(df,df_features,value_limit=50,replace=True) cleaning_widget = ColorLabelingWidget() cleaning_widget.run_widget(feature_value_color_dict) ``` ### Reinitialize feature color dictionary ``` feature_value_color_dict = cleaning_widget.get_feature_value_color_dict() feature_value_color_dict df_features.set_feature_colors(feature_value_color_dict) ``` # Label categories if possible ### Interaction required ### It's considered good practice to label up your categories with proper labels for graphing/analysis ``` create_new_cell_with_categorical_dict(df,df_features,value_limit=50,replace=True) df_features.set_encoder_for_features(df, categorical_value_dict) ``` # Value Reprsentation It's good practice to describe our data as best as possible. Instead of values being abbreviation forms of their actual value. Ex: M = Male ``` create_new_cell_with_value_representation(df,df_features,value_limit=50,replace=True) df_features.set_feature_value_representation(feature_value_representation) ``` # Bin any numerical values ``` create_new_cell_with_binned_features(df,df_features,bins=5,replace=True) ``` # Test encoding and value reprsentation ``` data_encoder = DataEncoder(create_file=False) df data_encoder.apply_value_representation(df, df_features) df data_encoder.revert_value_representation(df, df_features) df data_encoder.make_values_bool(df, df_features) df data_encoder.revert_value_representation(df, df_features) df data_encoder.encode_data(df, df_features, apply_value_representation=True) df data_encoder.decode_data(df, df_features, apply_value_representation=True) df qualitative_features = df_features.string_features() | df_features.categorical_features() data_encoder.make_dummies(df, df_features, qualitative_features=qualitative_features) df data_encoder.revert_dummies(df, df_features, qualitative_features=qualitative_features) df ``` # Decode and apply value reprsentation for feature analysis ``` data_encoder.apply_value_representation(df, df_features) df data_encoder.decode_data(df, df_features) df df_features.display_features() ``` ## Create a json file of df_features ``` created_dir = create_dir_structure(os.getcwd(), f"/eflow Data/{dataset_name}") df_features.create_json_file_representation(created_dir, "df_features.json") # df_features = DataFrameTypes() # df_features.init_on_json_file(os.getcwd() + f"/eflow Data/{dataset_name}/df_features.json") # df_features.display_features() ``` # Feature Analysis of feature data ``` feature_analysis = FeatureAnalysis(df_features, project_sub_dir=inspect_data_project_dir) feature_analysis.perform_analysis(df, dataset_name="Full " + dataset_name, target_features=[df_features.target_feature()], suppress_runtime_errors=True, display_print=False, display_visuals=False, dataframe_snapshot=True, statistical_analysis_on_aggregates=True) ``` # Get P-value summary on aggerations info for stastical methods ``` infile = open(feature_analysis.folder_path + "Full " + dataset_name + "/_Extras/Statistics/Stat methods of features dataframes.pkl",'rb') stat_methods_dict = pickle.load(infile) infile.close() for stats_method in stat_methods_dict.keys(): print(stats_method) display(stat_methods_dict[stats_method].round(6)) all_feature_relationship = set() for feature_relationship in stat_methods_dict[stats_method][:10].index.to_list(): for feature in feature_relationship.split(" compared to "): all_feature_relationship.add(feature) print(all_feature_relationship) del stat_methods_dict ``` # Get entropy table ``` infile = open(feature_analysis.folder_path + "Full " + dataset_name + "/_Extras/Statistics/Entropy Table.pkl",'rb') entropy_table = pickle.load(infile) infile.close() entropy_table del feature_analysis ``` # Null Analysis of data ``` null_analysis = NullAnalysis(df_features, project_sub_dir=inspect_data_project_dir, notebook_mode=notebook_mode) null_analysis.perform_analysis(df, dataset_name="Full " + dataset_name, null_features_only=True, display_visuals=True, display_print=False, dataframe_snapshot=True) missing_table = missing_values_table(df) display(missing_table) nan_features = missing_table[missing_table["% of Total Values"] > 15].index.to_list() nan_features null_analysis.feature_analysis_of_null_data(df, "Full " + dataset_name, target_features=[df_features.target_feature()], display_visuals=False, display_print=False, save_file=True, suppress_runtime_errors=True, aggregate_target_feature=True, extra_tables=True, nan_features=nan_features) del null_analysis ``` # Analyze data after binning ``` continuous_numerical_features = df_features.continuous_numerical_features() data_encoder.apply_binning(df, df_features, continuous_numerical_features) df ``` # Feature Analysis of feature data after binning ``` feature_analysis = FeatureAnalysis(df_features, project_sub_dir=inspect_data_project_dir) feature_analysis.perform_analysis(df, dataset_name= "Binned Continuous " + dataset_name, target_features=[df_features.target_feature()], suppress_runtime_errors=True, display_print=False, display_visuals=False, dataframe_snapshot=False, selected_features=continuous_numerical_features, statistical_analysis_on_aggregates=False) ``` # Get entropy table ``` infile = open(feature_analysis.folder_path + "Binned Continuous " + dataset_name + "/_Extras/Statistics/Entropy Table.pkl",'rb') entropy_table = pickle.load(infile) infile.close() entropy_table del feature_analysis ``` # Null Analysis of data after binning ``` null_analysis = NullAnalysis(df_features, project_sub_dir=inspect_data_project_dir, notebook_mode=notebook_mode) null_analysis.feature_analysis_of_null_data(df, "Binned Continuous " + dataset_name, target_features=[df_features.target_feature()], display_visuals=False, display_print=False, save_file=True, selected_features=continuous_numerical_features, suppress_runtime_errors=True, aggregate_target_feature=True, extra_tables=True, nan_features=nan_features, statistical_analysis_on_aggregates=False) del null_analysis remove_unconnected_pipeline_segments() ```
github_jupyter
# Dataset tests of Autometacal Proof-of-Concept This is a modified version of the Fourier proof-of-concept notebook that repeats the same steps, side by side with the original nb and an example from the dataset. The database example has: - example['gal_image'] # the observation of the galaxy, convolved with the psf - example['psf_image'] # the image of the psf - example['gal_kimage'] # k space image of observation - example['psf_kimage'] # k space image of psf - example['label'] # g1, g2 of galaxy Noise in the database is gaussian with SNR $200\pm20$ ``` %pylab inline import galsim import os os.environ["CUDA_VISIBLE_DEVICES"]="-1" import sys sys.path.append("..") import autometacal autometacal import tensorflow_datasets as tfds ``` ## Dataset ``` data = tfds.load('gal_gen/small_stamp_100',as_supervised=False,with_info=False) data = data['train'].take(100) datait = data.as_numpy_iterator() datalist=[next(datait) for i in range(100)] example = datalist[0] ``` ## Create Galaxy Model Original: ``` gal_flux = 1.e5 # counts gal_r0 = 1 # arcsec g1 = 0.1 # g2 = 0.2 # pixel_scale = 0.2 # arcsec / pixel img_size = 50 psf_beta = 5 # psf_re = 1.0 # arcsec # Define the galaxy profile. gal = galsim.Exponential(flux=gal_flux, scale_radius=gal_r0) # To make sure that GalSim is not cheating, i.e. using the analytic formula of the light profile # when computing the affine transformation, it might be a good idea to instantiate the image as # an interpolated image. # We also make sure GalSim is using the same kind of interpolation as us (bilinear for TF) gal = galsim.InterpolatedImage(gal.drawImage(nx=img_size,ny=img_size, scale=pixel_scale), x_interpolant='linear') # Shear the galaxy by some value. # There are quite a few ways you can use to specify a shape. # q, beta Axis ratio and position angle: q = b/a, 0 < q < 1 # e, beta Ellipticity and position angle: |e| = (1-q^2)/(1+q^2) # g, beta ("Reduced") Shear and position angle: |g| = (1-q)/(1+q) # eta, beta Conformal shear and position angle: eta = ln(1/q) # e1,e2 Ellipticity components: e1 = e cos(2 beta), e2 = e sin(2 beta) # g1,g2 ("Reduced") shear components: g1 = g cos(2 beta), g2 = g sin(2 beta) # eta1,eta2 Conformal shear components: eta1 = eta cos(2 beta), eta2 = eta sin(2 beta) gal0 = gal.shear(g1=g1, g2=g2) psf = galsim.Moffat(beta=psf_beta, flux=1., half_light_radius=psf_re) gal = galsim.Convolve([gal0, psf]) image_original = gal0.original.drawImage(nx=img_size,ny=img_size, scale=pixel_scale, method='no_pixel').array image_shear = gal.drawImage(nx=img_size,ny=img_size,scale=pixel_scale, method='no_pixel', use_true_center=False).array subplot(121) imshow(image_original, origin='lower') title('original galaxy') subplot(122) title('after shear and PSF') imshow(image_shear, origin='lower') ``` Dataset: ``` imshow(example['gal_model']) ``` ## Create simple shear measurements ``` import tensorflow as tf # let's try to do shape measurement using weigthed moments nx = img_size ny = img_size XX=np.zeros((nx,ny)) XY=np.zeros((nx,ny)) YY=np.zeros((nx,ny)) w = np.zeros((nx,ny)) sigma=40 for i in range(0,nx): x=0.5+i-(nx)/2.0 for j in range(0,ny): y=0.5+j-(ny)/2.0 XX[i,j]=x*x XY[i,j]=x*y YY[i,j]=y*y w[i,j]=np.exp(-((x) ** 2 + (y) ** 2) / (2 * sigma ** 2)) def get_ellipticity(img): img = tf.convert_to_tensor(img, dtype=tf.float32) norm = tf.reduce_sum(w*img) Q11 = tf.reduce_sum(w*img*YY)/norm Q12 = tf.reduce_sum(w*img*XY)/norm Q21 = Q12 Q22 = tf.reduce_sum(w*img*XX)/norm q1 = Q11 - Q22 q2 = 2*Q12 T= Q11 + Q22 + 2*tf.sqrt(Q11*Q22 - Q12**2) return q1/T, q2/T get_ellipticity(image_shear+10*randn(img_size,img_size)) imshow(w*(image_shear+10*randn(img_size,img_size))) ``` Dataset: ``` imshow(example['obs_image']) print(get_ellipticity(example['obs_image'])) print("True:", example['label']) ``` ## Create kspace images ``` noise = galsim.GaussianNoise().withVariance(5) obs_imag = gal.drawImage(nx=img_size,ny=img_size, scale=pixel_scale, method='no_pixel') noise.applyTo(obs_imag) # Make noise image noise_imag = galsim.Image(img_size,img_size, scale=pixel_scale) noise.applyTo(noise_imag) # Building observed image object obs = galsim.InterpolatedImage(obs_imag) nos = galsim.InterpolatedImage(noise_imag) # We draw the PSF image in Kspace at the correct resolution N = img_size im_scale = pixel_scale interp_factor=2 padding_factor=1 Nk = N*interp_factor*padding_factor from galsim.bounds import _BoundsI bounds = _BoundsI(-Nk//2, Nk//2-1, -Nk//2, Nk//2-1) impsf = psf.drawKImage(bounds=bounds, scale=2.*np.pi/(N*padding_factor* im_scale), recenter=False) ipsf = galsim.Deconvolve(psf) imipsf = ipsf.drawKImage(bounds=bounds, scale=2.*np.pi/(N*padding_factor* im_scale), recenter=False) imgal = obs.drawKImage(bounds=bounds, scale=2.*np.pi/(N*padding_factor* im_scale), recenter=False) imnos = nos.drawKImage(bounds=bounds, scale=2.*np.pi/(N*padding_factor* im_scale), recenter=False) imipsf.array.shape tfimpsf = tf.signal.fftshift(tf.expand_dims(tf.convert_to_tensor(impsf.array, dtype=tf.complex64),0),axes=2)[:,:,:(img_size*interp_factor*padding_factor)//2+1] tfimipsf = tf.expand_dims(tf.convert_to_tensor(imipsf.array, dtype=tf.complex64),0) tfimgal = tf.expand_dims(tf.convert_to_tensor(imgal.array, dtype=tf.complex64),0) tfimnos = tf.expand_dims(tf.convert_to_tensor(imnos.array, dtype=tf.complex64),0) ``` Dataset: ``` psf_kimage = autometacal.datasets.gal_gen.recomplexify(example['psf_kimage']) gal_kimage = autometacal.datasets.gal_gen.recomplexify(example['obs_kimage']) psf_inverse = autometacal.datasets.galaxies.gs_Deconvolve(example['psf_image']) tfimpsf_dataset = tf.convert_to_tensor(psf_kimage, dtype=tf.complex64) #psf k image tfimipsf_dataset = tf.convert_to_tensor(psf_inverse, dtype=tf.complex64)#psf deconv tfimgal_dataset = tf.convert_to_tensor(gal_kimage, dtype=tf.complex64)# gal k image tfimnos_dataset = tf.convert_to_tensor(imnos.array, dtype=tf.complex64)# noise image tfimgal_dataset = tf.expand_dims(tfimgal_dataset ,0) #gal k image tfimnos_dataset = tf.expand_dims(tfimnos_dataset ,0) #noise k image tfimpsf_dataset = tf.expand_dims(tfimpsf_dataset ,0) # psf k image tfimipsf_dataset = tf.expand_dims(tfimipsf_dataset ,0) #psf deconv tfimpsf_dataset = tf.signal.fftshift(tfimpsf_dataset,axes=2)[:,:,:(img_size*interp_factor*padding_factor)//2+1] ``` ## Show Conv/Deconv ``` import galflow as gf # Deconvolve image temp = tfimgal * tfimipsf temp = tf.signal.fftshift(temp,axes=2)[:,:,:(img_size*interp_factor*padding_factor)//2+1] # Reconvolve image tst2 = gf.kconvolve(temp, tfimpsf)[...,0] tst2 = tf.expand_dims(tf.signal.fftshift(tst2),-1) tst2 = tf.image.resize_with_crop_or_pad(tst2, img_size, img_size) figure(figsize=[15,5]) subplot(131) title('input image') imshow(image_shear) subplot(132) title('reconvolved image') imshow(tst2[0]); subplot(133) title('residuals image') imshow(((tst2[0,...,0] - image_shear)));# colorbar(); ``` Dataset: ``` # Deconvolve image temp_dataset = tfimgal_dataset * tfimipsf_dataset temp_dataset = tf.signal.fftshift(temp_dataset,axes=2)[:,:,:(img_size*interp_factor*padding_factor)//2+1] # Reconvolve image tst2_dataset = gf.kconvolve(temp_dataset, tfimpsf_dataset)[...,0] tst2_dataset = tf.expand_dims(tf.signal.fftshift(tst2_dataset),-1) tst2_dataset = tf.image.resize_with_crop_or_pad(tst2_dataset, img_size, img_size) figure(figsize=[15,5]) subplot(131) title('input image') imshow(example['obs_image']) subplot(132) title('reconvolved image') imshow(tst2_dataset[0]); subplot(133) title('residuals image') imshow(((tst2_dataset[0,...,0] - example['obs_image'])));# colorbar(); ``` ## Metacal Test ``` @tf.function def metacal_shear(gal_img, #gal_kimage nos_img, #noise_kimage inv_psf_img, psf_img, g1, g2): g1 = tf.reshape(tf.convert_to_tensor(g1, dtype=tf.float32), [-1]) g2 = tf.reshape(tf.convert_to_tensor(g2, dtype=tf.float32), [-1]) # Step1: remove observed psf img = gal_img * inv_psf_img imgn = nos_img * inv_psf_img # Step2: add shear layer img = gf.shear(tf.expand_dims(img,-1), g1, g2)[...,0] imgn = gf.shear(tf.expand_dims(imgn,-1), -g1, -g2) imgn=tf.image.rot90(imgn,-1)[...,0] # Step3: apply psf again img = gf.kconvolve(tf.signal.fftshift(img,axes=2)[...,:(len(img[0]))//2+1], (psf_img))[...,0] img = tf.expand_dims(tf.signal.fftshift(img),-1) img = tf.image.resize_with_crop_or_pad(img, img_size, img_size) imgn = gf.kconvolve(tf.signal.fftshift(imgn,axes=2)[...,:(len(imgn[0]))//2+1], (psf_img))[...,0] imgn = tf.expand_dims(tf.signal.fftshift(imgn),-1) imgn = tf.image.resize_with_crop_or_pad(imgn, img_size, img_size) # Adding the inversed sheared noise img += imgn # Step4: compute ellipticity return img, tf.stack(get_ellipticity(img[0,:,:,0] )) i, res = metacal_shear(tfimgal_dataset, tfimnos, tfimipsf_dataset, tfimpsf_dataset, 0.05, 0.05) print("measured shape", res.numpy()) figure(figsize=[10,5]) subplot(131) title('input image') imshow(example['obs_image']) subplot(132) title('metacal image') imshow(i[0,:,:,0]) subplot(133) title('residuals') imshow(example['obs_image']-i[0,:,:,0]) res.numpy() # True ellipticity: [0.1, 0.2] @tf.function def get_metacal_response(tfimgal, tfimnos, tfimipsf, tfimpsf): g = tf.zeros(2) with tf.GradientTape() as tape: tape.watch(g) # Measure ellipticity under metacal _, e = metacal_shear(tfimgal, tfimnos, tfimipsf, tfimpsf, g[0], g[1]) # Compute response matrix R = tape.jacobian(e, g) return e, R e,R = get_metacal_response(tfimgal, tfimnos, tfimipsf, tfimpsf) # Apply inverse response matrix :-D calibrated_e = tf.linalg.inv(R) @ tf.reshape(e,[2,1]) print("measured ellipticity ", e.numpy()) print("calibrated ellipticity", calibrated_e.numpy().squeeze()) print("true g1,g2 ",g1,g2) @tf.function def generate_mcal_image2(gal_image, psf_image, gal_kimage, psf_kimage, psf_inverse, noise_image, g): """ Generate a metacalibrated image. """ g1, g2 = g[0], g[1] g1 = tf.reshape(tf.convert_to_tensor(g1, dtype=tf.float32), [-1]) g2 = tf.reshape(tf.convert_to_tensor(g2, dtype=tf.float32), [-1]) #sizes img_size = len(gal_image) kmg_size = len(gal_kimage) ### tensorflow preparation ops #galaxy k image tf_gal_img = tf.expand_dims(tf.convert_to_tensor(gal_kimage, dtype=tf.complex64),0) #psf k image tf_psf_img = tf.signal.fftshift(tf.expand_dims(tf.convert_to_tensor(psf_kimage, dtype=tf.complex64),0),axes=2)[:,:,:(kmg_size)//2+1] #psf deconvolution kernel tf_inv_psf_img = tf.expand_dims(tf.convert_to_tensor(psf_inverse, dtype=tf.complex64),0) #noise k image tf_nos_img = tf.expand_dims(tf.convert_to_tensor(noise_image, dtype=tf.complex64),0) ### metacal procedure # Step1: remove observed psf img = tf_gal_img * tf_inv_psf_img imgn = tf_nos_img * tf_inv_psf_img # Step2: add shear layer img = gf.shear(tf.expand_dims(img,-1), g1, g2)[...,0] imgn = gf.shear(tf.expand_dims(imgn,-1), -g1, -g2) imgn=tf.image.rot90(imgn,-1)[...,0] # Step3: apply psf again img = gf.kconvolve(tf.signal.fftshift(img,axes=2)[...,:(len(img[0]))//2+1], (tf_psf_img))[...,0] img = tf.expand_dims(tf.signal.fftshift(img),-1) img = tf.image.resize_with_crop_or_pad(img, img_size, img_size) imgn = gf.kconvolve(tf.signal.fftshift(imgn,axes=2)[...,:(len(imgn[0]))//2+1], (tf_psf_img))[...,0] imgn = tf.expand_dims(tf.signal.fftshift(imgn),-1) imgn = tf.image.resize_with_crop_or_pad(imgn, img_size, img_size) # Adding the inverse sheared noise img += imgn return img psf_kimage = autometacal.datasets.gal_gen.recomplexify(example['psf_kimage']) gal_kimage = autometacal.datasets.gal_gen.recomplexify(example['obs_kimage']) psf_inverse = autometacal.datasets.galaxies.gs_Deconvolve(example['psf_image']) test=autometacal.metacal.generate_mcal_image(example['obs_image'], example['psf_image'], gal_kimage, psf_kimage, psf_inverse, imnos.array.astype('complex64'), [0,0]) plt.imshow(test.numpy()[0,...,0]) ```
github_jupyter
# Digit Classification using Naive Bayes, Random Forest and SVM ### - Yash Pasar #### This project deals with predicting handwritten digits by building classifiers using Naive Bayes, K-Nearest Neighbor and Support Vector Machine algorithms. The dataset has class label from 0-9 and feature set of pixel values for the handwritten digits. Our aim is to build a classifier using the pixel values to predict the label for the digit ### Importing all the necessary packages ``` import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt, matplotlib.image as mpimg import os from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import time from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler from sklearn import metrics from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB ``` ### Reading Sample Train data and Test data ``` train_data = pd.read_csv('/Users/yashpasar/Downloads/Kaggle-digit-train-sample-small-1400.csv') test_data = pd.read_csv('/Users/yashpasar/Downloads/Kaggle-digit-test-sample1000.csv') train_data.head() test_data.head() ``` ### Check for null values ``` print('Columns with null values:' ,sum(list(train_data.isnull().any()))) print('Columns with null values:' ,sum(list(test_data.isnull().any()))) ``` ### Removing null values from the test data ``` test_data = test_data.fillna(0) ``` ### Splitting data into Train and Test ``` X_train = train_data.drop('label', axis=1) y_train = train_data.label X_test = test_data.drop('label', axis=1) y_test = test_data.label label = sorted(y_train.unique()) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) ``` ### Visualizing the data to be classified ``` plt.figure(figsize=(9,9)) for i in label: plt.subplot(1,10, i+1) img = np.array( X_train[y_train==i][1:2] ).reshape(28,28) plt.imshow(img) ``` ## Naive Bayes Gaussian Model ``` gnb = GaussianNB() scores = cross_val_score(gnb, X_train, y_train, cv=5) print(f"Accuracy: {100*scores.mean()}%") gnb.fit(X_train, y_train) gnb.predict(X_test) ``` ## Support Vector Machine Model ### Preprocessing the data for SVM be applying the Standard Scaler function ``` scaler = StandardScaler() scaler.fit(X_train) X_train_std = scaler.transform(X_train) X_test_std = scaler.transform(X_test) clf = svm.SVC() scores = cross_val_score(clf, X_train_std, y_train, cv=5) print(f"Accuracy: {100*scores.mean()}%") ``` ### Parameter Tuning for SVM Model ``` param_grid = {'kernel' : ['rbf', 'linear'], 'gamma' : [1, 0.1, 0.01, 0.001], 'C' : [0.25,0.5,0.75,1]} grid = GridSearchCV(clf, param_grid, cv=3, scoring='accuracy') grid.fit(X_train_std, y_train) C=grid.best_params_['C'] kernel = grid.best_params_['kernel'] gamma=grid.best_params_['gamma'] clf=svm.SVC(kernel=kernel, C=C,gamma=gamma, random_state=42) scores = cross_val_score(clf, X_train_std, y_train, cv=5) print(f"Accuracy: {100*scores.mean()}%") clf.fit(X_train_std, y_train) clf.predict(X_test_std) ``` ## Random Forest Model ``` model = RandomForestClassifier() scores = cross_val_score(model, X_train, y_train, cv=5) print(f"Accuracy: {100*scores.mean()}%") ``` ### Parameter Tuning for Random Forest Model ``` param_grid = {'n_estimators' : [25,50,75,100,300]} grid = GridSearchCV(model, param_grid, cv=3, scoring='accuracy') grid.fit(X_train, y_train) n_estimators=grid.best_params_['n_estimators'] model = RandomForestClassifier(n_estimators=n_estimators, min_samples_split=2, min_samples_leaf=1) scores = cross_val_score(model, X_train, y_train, cv=5) print(f"Accuracy: {100*scores.mean()}%") model.fit(X_train, y_train) model.predict(X_test) ``` ## Boxplots showing the spread of the accuracy scores across each cross validation fold for each algorithm. ``` models=[] time_taken=[] models.append(('NB', gnb)) models.append(('SVM', clf)) models.append(('RF', model)) results = [] names = [] scoring = 'accuracy' for name, model in models: if models == 'NB' or models == 'RF': start = time.time() cv_results = cross_val_score(model, X_train, y_train, cv=5, scoring=scoring) end = time.time() time_taken = end - start else: start = time.time() cv_results = cross_val_score(model, X_train_std, y_train, cv=5, scoring=scoring) end = time.time() time_taken = end - start results.append(cv_results) names.append(name) msg = "%s: %f, %f" % (name, (cv_results.mean()*100),time_taken) print(msg) ``` ## Boxplot algorithm comparison ``` fig = plt.figure(figsize=(12, 10)) fig.suptitle('Algorithm Comparison') ax = fig.add_subplot(111) sns.boxplot(data=results, palette="Set2") ax.set_xticklabels(names) plt.show() ``` ## Takeaway: ### - As seen from the above comparison, it is clear that implementing Random Forest gives us the best accuracy (91.12%) among all 3 models. While it provides us with the best accuracy, it also consumes the most time (6.76 secs). The reason being that it has the most number of parameters to be tuned. ### - SVM takes 5.96 secs and produces results with 87.57% of accuracy. It took lesser time than Random Forest becuase it has less parameters to be tuned. ### -Naive Bayes takes the least amount of the time to run but it also provides us with a low accuracy rate. It works on the assumption of independence of variables. When the assumption is violated, it might not perform well. This might be reason that Naive Bayes performance in this problem is poor if compared to Random Forest Classifier and SVM. ### -It is also seen that naive bayes model overfits the data as the variance in the cross validation accuracies is large for naive bayes.
github_jupyter
``` # https://raw.githubusercontent.com/fchollet/keras/master/examples/lstm_text_generation.py from __future__ import absolute_import from __future__ import division from __future__ import print_function # tensorflow import tensorflow as tf import tensorflow.contrib.rnn as rnn import tensorflow.contrib.learn as tflearn import tensorflow.contrib.layers as tflayers # keras from tensorflow.contrib.keras.python.keras.layers import Dense, LSTM, GRU, Activation from tensorflow.contrib.keras.python.keras.utils.data_utils import get_file # input data from tensorflow.examples.tutorials.mnist import input_data # estimators from tensorflow.contrib import learn # estimator "builder" from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib # helpers import numpy as np import random import sys # enable logs tf.logging.set_verbosity(tf.logging.INFO) def sample(preds, temperature=1.0): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) # THE MODEL def model_fn(features, targets, mode, params): """Model function for Estimator.""" # 1. Configure the model via TensorFlow operations # First, build all the model, a good idea is using Keras or tf.layers # since these are high-level API's #lstm = GRU(128, input_shape=(params["maxlen"], params["vocab_size"]))(features) #preds = Dense(params["vocab_size"], activation='sigmoid')(lstm) # 0. Reformat input shape to become a sequence lstm1 = GRU(128, input_shape=(params["maxlen"], params["vocab_size"]), return_sequences=False)(features) #lstm2 = GRU(128)(lstm1) preds = Dense(params["vocab_size"])(lstm1) preds_softmax = Activation("softmax")(preds) # 2. Define the loss function for training/evaluation loss = None train_op = None # Calculate Loss (for both TRAIN and EVAL modes) if mode != learn.ModeKeys.PREDICT: loss = tf.losses.softmax_cross_entropy( onehot_labels=targets, logits=preds) # 3. Define the training operation/optimizer # Configure the Training Op (for TRAIN mode) if mode == learn.ModeKeys.TRAIN: train_op = tf.contrib.layers.optimize_loss( loss=loss, global_step=tf.contrib.framework.get_global_step(), learning_rate=params["learning_rate"], optimizer="RMSProp", ) # 4. Generate predictions predictions_dict = { "preds": preds_softmax } # 5. Define how you want to evaluate the model metrics = { "accuracy": tf.metrics.accuracy(tf.argmax(input=preds_softmax, axis=1), tf.argmax(input=targets, axis=1)) } # 6. Return predictions/loss/train_op/eval_metric_ops in ModelFnOps object return model_fn_lib.ModelFnOps( mode=mode, predictions=predictions_dict, loss=loss, train_op=train_op, eval_metric_ops=metrics) print('Getting data') #path = get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt') path = 'shakespeare.txt' text = open(path).read().lower() print('corpus length:', len(text)) chars = sorted(list(set(text))) print('total chars:', len(chars)) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) # cut the text in semi-redundant sequences of maxlen characters maxlen = 40 step = 1 sentences = [] next_chars = [] for i in range(0, len(text) - maxlen, step): sentences.append(text[i: i + maxlen]) next_chars.append(text[i + maxlen]) print('nb sequences:', len(sentences)) print('Vectorization...') X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.float32) y = np.zeros((len(sentences), len(chars)), dtype=np.float32) for i, sentence in enumerate(sentences): for t, char in enumerate(sentence): X[i, t, char_indices[char]] = 1 y[i, char_indices[next_chars[i]]] = 1 print(X[0]) ```
github_jupyter
## Model 1: Policy simulation The objective of this model-based simulation is to analyse the impact of policy, technology, and commodity changes on consumer price inflation in selected countries. The simulation environment is learnt from real data, after which simulations using synthetic data are used to do policy analysis by manipulating a number of selected variables such as government debt, cellular subscription, gdp growth, and real interest rates in the synthetic data. A secondary purpose of the simulation model is to identify and map the interactions between world-level and country-level indicator variables. #### Features ------------ Multivariate human and technological development indicator timeseries 1. aggregated across nations using hand-crafted rules. 2. raw, collected on a per-country level. #### Labels ---------- Consumer price inflation levels for the following countries: * Singapore * Switzerland * Netherlands * Japan * France * United States * China * India * Brazil * Colombia * Indonesia * Senegal * Ghana #### Training ------------ Training is done on a feature - single country basis. ### Load and prepare the data ``` import warnings import numpy as np import pandas as pd import seaborn as sns import tensorflow as tf import tensorflow_probability as tfp import matplotlib.pyplot as plt from tensorflow import keras %matplotlib inline warnings.filterwarnings('ignore') pd.options.display.float_format = '{:20,.4f}'.format sns.set_style("whitegrid") sns.set_palette("colorblind") country = 'Colombia' country_labels = ['Brazil', 'China', 'Colombia', 'France', 'Ghana', 'India', 'Indonesia', 'Japan', 'Netherlands', 'Senegal', 'Singapore', 'Switzerland', 'United States'] assert country in country_labels ``` #### Load and combine the features and labels ``` features_df = pd.read_csv('features/m_one/world_aggregate.csv', sep=';', header=0) labels_df = pd.read_csv('features/m_one/labels_interpolated.csv', sep=';', header=0) features_df.head() labels_df.head() combined_df = pd.concat([features_df, labels_df.drop(columns=['date'])], axis=1) combined_df.head() fig, ax = plt.subplots(figsize=(15,7)) [sns.lineplot(x='date', y=c, markers=True, ax=ax, label=c, data=combined_df) for c in list(filter(lambda x: x not in ['Brazil', 'Indonesia', 'Ghana'], country_labels))] xticks=ax.xaxis.get_major_ticks() for i in range(len(xticks)): if i % 12 == 1: xticks[i].set_visible(True) else: xticks[i].set_visible(False) ax.set_xticklabels(combined_df['date'], rotation=45); combined_df.columns ``` ### Prepare the country features ``` base_feature_df = combined_df[['bank capital to assets ratio', 'bank nonperforming loans', 'cereal yield', 'energy imports', 'food exports', 'high-tech exports', 'inflation', 'lending interest rate', 'life expectancy', 'population density', 'real interest rate', 'broad money', 'exports of goods and services', 'gross domestic savings', 'high-tech value added', 'household consumption expenditure', 'imports of goods and services', 'listed companies', 'manufacturing value added', 'r and d spend', 'services trade', 'trade', 'government debt service', 'government interest payments external debt', 'government tax revenue', 'birth deaths', 'broadband subscriptions', 'electricity access', 'co2 emissions', 'electricity consumption', 'mobile subscriptions', 'newborns', 'overweight', 'rural population', 'unemployed', 'urban population', 'workers', country]] base_feature_df.to_csv('features/m_one/combined_%s.csv' % country.lower(), sep=',', index=False) base_feature_df['label'] = base_feature_df[country].shift(periods=1) base_df = base_feature_df.drop(country, axis=1).fillna(0.00); num_obs = len(base_df) num_cols = len(base_df.columns) num_features = len(base_df.columns) - 1 ``` ### Model iterations --------------------- ### Exploration 1 **Multivariate LSTM** fitted on the real data, see https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/ - Activation function: Leaky ReLU. - Loss function: mean squared error. - Optimizer: adam. - Num observations source dataset: 684 (using lagshift, 1960-2016 inclusive monthly) - Num sequences (@ sequence length 6): 116. - Batch size: 4-8 sequences (although `size=48` would lead to more stable training) ``` from keras import Sequential from keras.layers import LSTM, Dense, LeakyReLU from keras.optimizers import Adam from sklearn.metrics import mean_squared_error lstm_params = { 'sequence_length': 4, 'batch_size': 8, 'num_epochs': 600, 'num_units': 128, 'lrelu_alpha': 0.3 } ``` #### LSTM features ``` features = [] labels = [] for i in range(int(num_obs / lstm_params['sequence_length'])): labels_df = base_df['label'] labels.append(labels_df[i:(i+lstm_params['sequence_length'])].values[-1:]) features.append(base_df[i:(i+lstm_params['sequence_length'])].values) lstm_train_X = np.asarray(features[0:100]) lstm_train_X = lstm_train_X.reshape((lstm_train_X.shape[0], lstm_params['sequence_length'], num_cols)) lstm_train_y = np.asarray(labels[0:100]) lstm_train_y = lstm_train_y.reshape((lstm_train_y.shape[0])) lstm_test_X = np.asarray(features[100:]) lstm_test_X = lstm_test_X.reshape((lstm_test_X.shape[0], lstm_params['sequence_length'], num_cols)) lstm_test_y = np.asarray(labels[100:]) lstm_test_y = lstm_test_y.reshape((lstm_test_y.shape[0])) X = np.asarray(features) X = X.reshape((X.shape[0], lstm_params['sequence_length'], num_cols)) y = np.asarray(labels) y = y.reshape((y.shape[0], 1)) print('X: %s, y: %s' % (X.shape, y.shape)) ``` #### Model: LSTM ``` model = Sequential() model.add(LSTM(lstm_params['num_units'], input_shape=(lstm_params['sequence_length'], num_cols))) model.add(Dense(1, activation=LeakyReLU(alpha=lstm_params['lrelu_alpha']))) model.compile(loss='mse', optimizer='adam') model.summary() train_run = model.fit(lstm_train_X, lstm_train_y, epochs=lstm_params['num_epochs'], batch_size=lstm_params['batch_size']) plt.plot(train_run.history['loss'], label='train') plt.legend() plt.show() ``` ##### Evaluate model performance ``` model.evaluate(lstm_test_X, lstm_test_y) yhat = model.predict(lstm_test_X) plt.figure(figsize=(15,7)) plt.plot(lstm_test_y, label='observed') plt.plot(yhat, label='predicted') plt.legend() plt.title('Observed versus predicted values for consumer price inflation in %s' % country) plt.show() print('rmse: %s\nmean observed: %s\nmean predicted: %s' % (np.sqrt(mean_squared_error(lstm_test_y, yhat)), np.mean(lstm_test_y), np.mean(yhat))) ``` ## Exploration 2 -------------------- **GAN** to generate training data, **LSTM** trained on generated data validated on the real data. ### Conditional GAN for policy-constrained timeseries generation See https://arxiv.org/pdf/1706.02633.pdf. ``` from keras.models import Sequential, Model from keras.layers import Input from keras.optimizers import Adam from sklearn.metrics import mean_squared_error gan_df = base_df gan_df.shape gan_cols = gan_df.shape[1] gan_params = { 'num_epochs': 1500, 'save_interval': 100, 'sequence_length': 6, 'num_variables': gan_cols, 'batch_size': 64, 'lr': 0.0001 } generator_params = { 'noise_sigma': 0.3, 'lstm_units': 128, 'lstm_dropout': 0.4, 'gru_units': 64, 'lr': 0.0001 } discriminator_params = { 'bi_lstm_units': 256, 'dropout_rate': 0.4, 'lr': 0.0001 } ``` #### GAN input sequences The collated World Bank and IMF data used as input for the data generator and to validate the model trained on generated data. ``` gan_features = [] gan_labels = [] for i in range(int(num_obs / gan_params['sequence_length'])): gan_labels_df = gan_df['label'] gan_labels.append(gan_labels_df[i:(i+gan_params['sequence_length'])].values[-1:]) gan_features.append(gan_df[i:(i+gan_params['sequence_length'])].values) real = np.asarray(gan_features) real = real.reshape((real.shape[0], gan_params['sequence_length'], gan_cols)) real.shape ``` #### Generator ``` from keras.layers import GaussianNoise, LSTM, Dropout, BatchNormalization, Dense, LocallyConnected2D, GRU, Reshape def build_encoder(params): gshape = params['sequence_length'], params['num_variables'] inputs = Input(shape=(gshape)) e = Sequential(name='encoder') e.add(LSTM(params['lstm_units'], input_shape=(gshape), return_sequences=True)) e.add(Dropout(params['lstm_dropout'])) e.add(GaussianNoise(stddev=params['noise_sigma'])) e.add(BatchNormalization(axis=2, momentum=0.8, epsilon=0.01)) e.add(Dense(params['num_variables'], activation='relu')) e.summary() return Model(inputs, e(inputs)) encoder = build_encoder({**gan_params, **generator_params}) def build_generator(params): gshape = params['sequence_length'], params['num_variables'] inputs = Input(shape=(gshape)) g = Sequential(name='generator') g.add(GRU(params['gru_units'], input_shape=(gshape), return_sequences=True)) g.add(Dense(params['num_variables'], activation='softmax')) g.add(Reshape(target_shape=(gshape))) g.summary() return Model(inputs, g(inputs)) generator = build_generator({**gan_params, **generator_params}) ``` #### Discriminator ``` from keras.layers import Bidirectional, LSTM, Dense, concatenate, Flatten def build_discriminator(params): dshape = params['sequence_length'], params['num_variables'] batch_shape = params['batch_size'], params['sequence_length'], params['num_variables'] real = Input(shape=(dshape)) generated = Input(shape=(dshape)) inputs = concatenate([generated, real], axis=1) d = Sequential(name='discriminator') d.add(Bidirectional(LSTM(params['bi_lstm_units']), batch_input_shape=(batch_shape))) d.add(Dropout(params['dropout_rate'])) d.add(Dense(1, activation='sigmoid')) d.summary() return Model([generated, real], d(inputs)) discriminator = build_discriminator({**gan_params, **discriminator_params}) discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=discriminator_params['lr']), metrics=['accuracy']) ``` #### GAN Bidirectional generative adversarial network, viz https://arxiv.org/abs/1605.09782. ``` def build_gan(encoder, generator, discriminator, params): ganshape = params['sequence_length'], params['num_variables'] discriminator.trainable = False noise = Input(shape=(ganshape)) generated = generator(noise) data = Input(shape=(ganshape)) encoded = encoder(data) fake = discriminator([noise, generated]) real = discriminator([encoded, data]) gan = Model([noise, data], [fake, real], name='gan') gan.summary() return gan gan = build_gan(encoder, generator, discriminator, gan_params) gan.compile(loss=['kullback_leibler_divergence', 'kullback_leibler_divergence'], optimizer=Adam(lr=generator_params['lr']), metrics=['mse', 'mse']) %%time def train_gan(real, batch_size, params): g_metrics = [] d_real_metrics = [] d_synth_metrics = [] reals = np.ones(batch_size) synths = np.zeros(batch_size) for i in range(params['num_epochs']): # create input of real and synthetic data random_index = np.random.randint(0, len(real) - batch_size) half_real = real[random_index:int(random_index + batch_size)] half_synth = np.random.normal(-1.0, 1.0, size=[batch_size, params['sequence_length'], real.shape[2]]) # apply generator and encoder generated = generator.predict(half_synth) encoded = encoder.predict(half_real) # train discriminator d_real = discriminator.train_on_batch([encoded, half_real], reals) d_synth = discriminator.train_on_batch([half_synth, generated], synths) # train gan gen_ = gan.train_on_batch([half_synth, half_real], [reals, synths]) if i % 100 == 0: print('Epoch %s losses: discriminator real: %.4f%%, discriminator synth: %.4f%%, generator: %.4f%%' % (i, d_real[0], d_synth[0], gen_[0])) d_real_metrics.append(d_real) d_synth_metrics.append(d_synth) g_metrics.append(gen_) return d_real_metrics, d_synth_metrics, g_metrics d_r_metrics, d_s_metrics, g_metrics = train_gan(real, gan_params['batch_size'], gan_params) plt.figure(figsize=(15,7)) plt.plot([metrics[0] for metrics in d_r_metrics], label='discriminator loss on reals') plt.plot([metrics[0] for metrics in d_s_metrics], label='discriminator loss on synths') plt.plot([metrics[0] for metrics in g_metrics], label='generator loss') plt.legend() plt.title('GAN losses') plt.show() plt.figure(figsize=(15,7)) plt.plot([metrics[1] for metrics in d_r_metrics], label='discriminator accuracy reals') plt.plot([metrics[1] for metrics in d_s_metrics], label='discriminator accuracy synths') plt.plot([metrics[1] for metrics in g_metrics], label='generator mean average error') plt.legend() plt.title('GAN performance metrics') plt.show() generated_y = generator.predict(np.random.rand(num_obs, gan_params['sequence_length'], gan_cols))[:,-1,-1] gan_y = gan_df['label'].values plt.figure(figsize=(15,7)) plt.plot(gan_y, label='observed cpi') plt.plot(generated_y, label='gan-generated cpi') plt.legend() plt.title('Observed versus GAN-generated values for consumer price inflation in %s' % country) plt.show() print('rmse: %s\nmean observed: %s\nmean generated: %s' % (np.sqrt(mean_squared_error(gan_y, generated_y)), np.mean(gan_y), np.mean(generated_y))) ``` ## Simulation 1 Question: what happens to consumer price inflation in the long run if the government decides to borrow more money? ##### Simulation parameters - central government debt - time horizon ##### Environment variables - world economy: selected generated macroeconomic indicators - country economy: selected generated country-level indicators - hybrid: interaction country x world ## Exploration 3 -------------------- **Sequence transformer network** to generate training data, **LSTM** trained on generated data validated on the real data. See https://arxiv.org/abs/1808.06725
github_jupyter
<a href="https://colab.research.google.com/github/camminady/sPYnning/blob/master/visworld_colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !pip install randomcolor import randomcolor # see: https://pypi.org/project/randomcolor/ !pip install gif import gif # see https://github.com/maxhumber/gif !pip install reverse_geocoder import reverse_geocoder as rg # see ttps://pypi.org/project/reverse_geocoder/ import numpy as np # plotting import matplotlib import matplotlib.pyplot as plt from matplotlib import cm, colors # 3d !sudo apt-get install libgeos-dev !sudo pip3 install https://github.com/matplotlib/basemap/archive/master.zip from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection # everything below is used to color the globe from mpl_toolkits.basemap import Basemap import json import requests from numpy import loadtxt, degrees, arcsin, arctan2, sort, unique from mpl_toolkits.basemap import Basemap import reverse_geocoder as rg import randomcolor def domino(lol): # Takes a list (length n) of lists (length 2) # and returns a list of indices order, # such that lol[order[i]] and lol[order[i+1]] # have at least one element in common. # If that is not possible, multiple # domino chains will be created. # This works in a greedy way. n = len(lol) order = [0] # Greedy link = lol[0][-1] links = [lol[0][0],lol[0][1]] while len(order)<n: for i in [j for j in range(n) if not j in order]: if link in lol[i]: # They connect order.append(i) # Save the id of the "stone" link = lol[i][0] if not(lol[i][0]==link) else lol[i][1] # The new link is the other element links.append(link) break return order,links[:-1] def getpatches(color,quadrature): xyz,neighbours,triangles = quadrature["xyz"], quadrature["neighbours"], quadrature["triangles"] nq = len(color) patches = [] for center in range(nq): lol = [] # list of lists for i in neighbours[center,:]: if i>-1: lol.append(list(sort(triangles[i,triangles[i,:] != center]))) order,links = domino(lol) neighx = [xyz[j,0] for j in links] neighy = [xyz[j,1] for j in links] neighz = [xyz[j,2] for j in links] # Get the actual hexagon that surrounds a center point x = [] y = [] z = [] for i in range(len(order)): x.append((xyz[center,0]+neighx[i]) / 2) x.append((xyz[center,0]+neighx[i]+neighx[(i+1)%len(order)])/3) y.append((xyz[center,1]+neighy[i]) / 2) y.append((xyz[center,1]+neighy[i]+neighy[(i+1)%len(order)])/3) z.append((xyz[center,2]+neighz[i]) / 2) z.append((xyz[center,2]+neighz[i]+neighz[(i+1)%len(order)])/3) verts = [list(zip(x,y,z))] patches.append(verts[0]) return patches def getquadrature(nq): prefix ="https://raw.githubusercontent.com/camminady/sPYnning/master/" quadrature = {} quadrature["nq"] = nq quadrature["xyz"] = loadtxt(f"{prefix}quadrature/{nq}/points.txt") quadrature["weights"] = loadtxt(f"{prefix}quadrature/{nq}/weights.txt") quadrature["neighbours"] = loadtxt(f"{prefix}quadrature/{nq}/neighbours.txt",dtype=int)-1 # julia starts at 1 quadrature["triangles"] = loadtxt(f"{prefix}quadrature/{nq}/triangles.txt",dtype=int)-1 # julia starts at 1 # Also convert to latitute, longitude quadrature["lat"] = degrees(arcsin(quadrature["xyz"][:,2]/1)) quadrature["lon"] = degrees(arctan2(quadrature["xyz"][:,1], quadrature["xyz"][:,0])) return quadrature def color_land(quadrature): bm = Basemap() colors = [] for i,(ypt, xpt) in enumerate(zip(quadrature["lat"],quadrature["lon"])): land = (bm.is_land(xpt,ypt)) colors.append("tab:green" if land else "tab:blue") return colors def color_country(quadrature): # uses reverse_geocoder results = rg.search([(la,lo) for la,lo in zip(quadrature["lat"],quadrature["lon"])]) # default mode = 2 countries = [] for i in range(len(results)): c = results[i]["cc"] countries.append(c) nunique = len(unique(countries)) raco = randomcolor.RandomColor() randomcolors = raco.generate(luminosity="dark", count=nunique) # options: https://github.com/kevinwuhoo/randomcolor-py colordict = dict(zip(unique(countries),randomcolors)) colorland = color_land(quadrature) # so we can color the ocean also in "tab:blue" colorcountries = [colordict[country] if colorland[i]!="tab:blue" else "tab:blue" for i,country in enumerate(countries) ] return colorcountries @gif.frame def myplot(color,quadrature, filename, angle=30): patches = getpatches(color,quadrature) # Get the hexagons fig = plt.figure(figsize=plt.figaspect(1)*2,constrained_layout=False) ax = fig.gca(projection='3d') # Visualize each hexagon, that is given in "color". A color is computed # for the center of the hexagon and then applied for the full hexagon ax.add_collection3d(Poly3DCollection(patches,facecolor = color,linewidth=0.1,edgecolor=color)) # Some styling l = 0.6 plt.axis("off") ax.set_xlim([-l,l]), ax.set_ylim([-l,l]),ax.set_zlim([-l,l]) ax.set_xticks([]), ax.set_yticks([]), ax.set_zticks([]) ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) for spine in ax.spines.values(): spine.set_visible(False) plt.tight_layout() ax.view_init(30, angle) fig.savefig(filename) # pick the number of cells on the globe from this list # [92, 492, 1212, 2252, 3612, 5292, 7292, 9612, 12252, 15212] nq = 2252 quadrature = getquadrature(nq) # plot the earth colors = color_land(quadrature) myplot(colors,quadrature,"earth.png") # higher resolution to plot countries nq = 7292 quadrature = getquadrature(nq) colors = color_country(quadrature) myplot(colors,quadrature,"earth_country.png") # creating a gif nq = 7292 quadrature = getquadrature(nq) colors = color_land(quadrature) frames = [] nframes = 20 # the more, the slower for i,angle in enumerate(np.linspace(0,360,nframes)[:-1]): print(i,end=",") frames.append(myplot(colors,quadrature,"tmp.png",angle=angle)) gif.save(frames,"spinning_earth.gif") ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Terrain/srtm.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/srtm.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Datasets/Terrain/srtm.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/srtm.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`. The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time. ``` # %%capture # !pip install earthengine-api # !pip install geehydro ``` Import libraries ``` import ee import folium import geehydro ``` Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` if you are running this notebook for the first time or if you are getting an authentication error. ``` # ee.Authenticate() ee.Initialize() ``` ## Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`. ``` Map = folium.Map(location=[40, -100], zoom_start=4) Map.setOptions('HYBRID') ``` ## Add Earth Engine Python script ``` image = ee.Image('srtm90_v4') # path = image.getDownloadUrl({ # 'scale': 30, # 'crs': 'EPSG:4326', # 'region': '[[-120, 35], [-119, 35], [-119, 34], [-120, 34]]' # }) vis_params = {'min': 0, 'max': 3000} Map.addLayer(image, vis_params, 'SRTM') ``` ## Display Earth Engine data layers ``` Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True) Map ```
github_jupyter
## Image classification vs Object detection vs Image segmentation <img src="https://media.discordapp.net/attachments/763819251249184789/857822034045567016/image.png"> <br><br> ## Image annotation: assigning labels <br> ## Popular datasets: ImageNet, COCO, Google Open Images ## Tensorflow hub has pre-train models ## Sliding window approch for object detection (Single Shot MultiBox Detector) - ### Keep on sliding a smaller window on the test image until find a match - ### Trial and error to find the right window size - ### Cons: too much calculation & bounding box shape maybe not accurate ## *Therefore there are faster algorithms:* ## R CNN (Retina-Net) --> Fast R CNN --> Faster R CNN --> YOLO (you only look once) # YOLO <img src="https://media.discordapp.net/attachments/763819251249184789/857843667619676190/image.png?width=1845&height=1182" width=700> <br> - ### Divide a image to multiple grid cells (usually 19 x 19) - ### An Object is in a specific cell only when the center coords of the box lie in that cell - ### Eliminates the bounding boxes using IoU (Intersection over Union: Overlapping area) to get the highest possibility one - ### Duplicate step # 3 until only 1 bounding box left (Non max supression) - ### If multiple objects belongs to one cell, we concatenate ancher boxes together (2 vector of size 8 --> 1 vector of size 16) <br> ### X_train: <br> image<br><br> ### y_train: | Name | Explanation | |:---:|:---:| | P | Probability of a object in the image | | Bx | Center of box X coord | | By | Center of box Y coord | | Bw | Width | | Bh | Height | | C1 | Is the object belongs to class 1? | | C2 | Is the object belongs to class 2? | ``` # A simple YOLO v5 demo import cv2 import pathlib import numpy as np from PIL import Image from yolov5 import YOLOv5 from matplotlib import pyplot as plt %matplotlib inline # set model params model_path = f"{os.path.dirname(os.path.abspath('__file__'))}/yolov5/weights/yolov5s.pt" # it automatically downloads yolov5s model to given path device = "cuda" # or "cpu" # init yolov5 model yolov5 = YOLOv5(model_path, device) # load images image1 = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' image2 = 'https://github.com/ultralytics/yolov5/blob/master/data/images/bus.jpg' # perform inference results = yolov5.predict(image1) # perform inference with larger input size results = yolov5.predict(image1, size=1280) # perform inference with test time augmentation results = yolov5.predict(image1, augment=True) # perform inference on multiple images # results = yolov5.predict([image1, image2], size=1280, augment=True) # parse results predictions = results.pred[0] boxes = predictions[:, :4] # x1, x2, y1, y2 scores = predictions[:, 4] categories = predictions[:, 5] # show detection bounding boxes on image # plt.imshow(np.reshape(results.imgs, (720, 1280, 3))), results.pred def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3): # Plots one bounding box on image 'im' using OpenCV assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) class Colors: # Ultralytics color palette https://ultralytics.com/ def __init__(self): # hex = matplotlib.colors.TABLEAU_COLORS.values() hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') self.palette = [self.hex2rgb('#' + c) for c in hex] self.n = len(self.palette) def __call__(self, i, bgr=False): c = self.palette[int(i) % self.n] return (c[2], c[1], c[0]) if bgr else c @staticmethod def hex2rgb(h): # rgb order (PIL) return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) colors = Colors() for i, (im, pred) in enumerate(zip(results.imgs, results.pred)): str = f'image {i + 1}/{len(results.pred)}: {im.shape[0]}x{im.shape[1]} ' if pred is not None: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {results.names[int(c)]}{'s' * (n > 1)}, " # add to string for *box, conf, cls in pred: # xyxy, confidence, class label = f'{results.names[int(cls)]} {conf:.2f}' plot_one_box(box, im, label=label, color=colors(cls)) im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np plt.imshow(im) ```
github_jupyter
``` import pandas as pd import numpy as np import itertools as it import datetime as dt import matplotlib.pyplot as plt import matplotlib.patheffects as path_effects import matplotlib.ticker as mtick pd.set_option('display.max_columns',50) plt.rc('axes', axisbelow=True) plt.rcParams['figure.facecolor'] = 'white' plt.rcParams['axes.facecolor']='white' plt.rcParams['savefig.facecolor']='white' %matplotlib inline def get_ci(df, col, gb_col='date', percentile='95'): if percentile == '999': cival = 3.291 if percentile == '995': cival = 2.807 if percentile == '99': cival = 2.576 if percentile == '95': cival = 1.96 if percentile == '90': cival = 1.645 if percentile == '80': cival = 1.282 df_stats = df.groupby(gb_col)[col].agg([np.nanmean,'count',np.nanstd]) ci95_hi = [] ci95_lo = [] for i in df_stats.index: m, c, s = df_stats.loc[i] ci95_hi.append(m + cival*s/np.sqrt(c)) ci95_lo.append(m - cival*s/np.sqrt(c)) df_stats['ci95_hi'] = ci95_hi df_stats['ci95_lo'] = ci95_lo return df_stats.reset_index() month_dict = {'01':'Jan','02':'Feb','03':'Mar','04':'Apr','05':'May','06':'Jun', '07':'Jul','08':'Aug','09':'Sep','10':'Oct','11':'Nov','12':'Dec'} today = str(dt.datetime.date(dt.datetime.now())) # make DATE_DF, a simple dataframe with dates, weekdays, and weeknums weekday_mapping = {0:'Monday',1:'Tuesday',2:'Wednesday',3:'Thursday', 4:'Friday',5:'Saturday',6:'Sunday'} alldates = pd.date_range(start='2020-01-01', end='2021-12-31').tolist() datelist = [str(i.date()) for i in alldates] weekdays = [weekday_mapping[i.date().weekday()] for i in alldates] weeklist = [i//7 for i in range(len(alldates))] DATE_DF = pd.DataFrame({'date':datelist,'day':weekdays,'week':weeklist}) DATE_DF = DATE_DF.loc[DATE_DF['date']<=today].copy() DATERANGE = DATE_DF['date'].tolist() date_dict = dict(zip(DATERANGE,list(range(len(DATERANGE))))) dates_tt = [i for i in DATERANGE if i[-2:]=='01'] dates_ll = [month_dict[i[5:7]] if i[5:7]!='01' else i[:4] for i in dates_tt] fs = 9 lw = 2 wid = 21 cols = ['steelblue','peru','darkslategrey','palevioletred'] cols2 = ["mediumseagreen","#2f4d5d"] pe1 = [path_effects.Stroke(linewidth=1.5,foreground='w'),path_effects.Normal()] pe2 = [path_effects.Stroke(linewidth=lw*1.5,foreground='w',alpha=0.9),path_effects.Normal()] plot_min_date = '2020-07-18' plot_max_date = '2021-01-08' ihe_df = pd.read_csv('../data/campus_covid_fall2020_full.csv', dtype={'ipeds_id':str, 'fips':str}) ipeds_df = pd.read_csv('../data/campus_covid_ipeds.csv', dtype={'ipeds_id':str, 'fips':str}) status_df = pd.read_csv('../data/campus_covid_status.csv', dtype={'ipeds_id':str, 'fips':str}) census_df = pd.read_csv('../data/campus_covid_census.csv', dtype={'fips':str}) jhu_df = pd.read_csv('../data/campus_covid_jhu_county.csv.gz', dtype={'fips':str}) summary_df = status_df.groupby(['fips','status_merge'] ).agg({'ihe_fulltime_population':'sum'}).reset_index() summary_df = summary_df.merge(jhu_df[['fips','county_population']].drop_duplicates(), how='left', on='fips') summary_df['enrollment_fulltime'] = summary_df['ihe_fulltime_population'] summary_df['total_pop'] = summary_df['county_population'] summary_df['frac_pop_enrollment_fulltime'] = summary_df['enrollment_fulltime']/summary_df['total_pop'] summary_df.head() percent_thresh = 0.03683369933503948 fips_inperson = summary_df.loc[(summary_df['frac_pop_enrollment_fulltime']>percent_thresh)&\ (summary_df['status_merge']=='Primarily In Person')]['fips'].unique() print(len(fips_inperson)) fips_online = summary_df.loc[(summary_df['frac_pop_enrollment_fulltime']>percent_thresh)&\ (summary_df['status_merge']=='Primarily Online')]['fips'].values fips_online = [i for i in fips_online if i not in fips_inperson] print(len(fips_online)) college_county_fips = list(fips_online) + list(fips_inperson) ll_online = jhu_df.loc[jhu_df['fips'].isin(fips_online)].copy() ll_person = jhu_df.loc[jhu_df['fips'].isin(fips_inperson)].copy() plot_dfs = [ll_online, ll_person] labs = ['"college counties" w/ primarily\nonline enrollment (n=%i)'%(len(fips_online)), '"college counties" w/ primarily\nin person enrollment (n=%i)'%(len(fips_inperson))] var1 = 'new_cases_per100k' var2 = 'new_deaths_per100k' plot_vars = [var1, var2] fig, ax = plt.subplots(1,2,figsize=(10,4),dpi=200) plt.subplots_adjust(wspace=0.2) for ix,var_i in enumerate(plot_vars): for li, llg in enumerate(plot_dfs): llg_i = get_ci(llg, var_i, gb_col='date', percentile='90') ax[ix].plot([date_dict[i] for i in llg_i['date'].values], llg_i['nanmean'].rolling(window=wid).mean().values, label=labs[li], lw=lw, alpha=0.9, color=cols[li], path_effects=pe2) ax[ix].fill_between([date_dict[i] for i in llg_i['date'].values], llg_i['ci95_lo'].rolling(window=wid).mean().values, llg_i['ci95_hi'].rolling(window=wid).mean().values, alpha=0.2, lw=0, color=cols[li]) ylim0 = ax[ix].get_ylim() ax[ix].fill_between([date_dict[plot_min_date],date_dict['2020-08-01']],0,ylim0[1], color='.7', alpha=0.4, zorder=0, lw=0) ax[ix].fill_between([date_dict['2020-12-23'],date_dict[plot_max_date]],0,ylim0[1], color='.7', alpha=0.4, zorder=0, lw=0) ax[ix].text(date_dict['2020-10-10'],ylim0[1]*0.975,'Fall 2020 semester', va='top',ha='center',color='.3', path_effects=pe1, fontsize=fs*1.4) ax[ix].set_ylim(0,ylim0[1]) ax[0].set_title('Average new cases per 100k (%i-day avg.)'%wid, color='.2', x=0,ha='left',fontsize=fs*1.25) ax[1].set_title('Average new deaths per 100k (%i-day avg.)'%wid, color='.2', x=0,ha='left',fontsize=fs*1.25) l = ax[0].legend(fontsize=fs*0.9, loc=1, ncol=3, framealpha=0, bbox_to_anchor=[1.7, -0.11]) for text in l.get_texts(): text.set_color('.2') letts = ["(a)","(b)"] for ai, a in enumerate(fig.axes): a.text(-0.02, 1.015, letts[ai], ha='right', va='bottom', transform=a.transAxes, fontweight='bold', fontsize='large', color='.2') a.set_xticks([date_dict[i] for i in dates_tt]) a.set_xticklabels(dates_ll) a.tick_params(axis='y', labelcolor='.3', color='.3',labelsize=fs*1.2) a.tick_params(axis='x', labelcolor='.3', color='.3',labelsize=fs*1.1) a.grid(linewidth=1.0, color='.6', alpha=0.35) a.set_xlim(date_dict[plot_min_date], date_dict[plot_max_date]) ax[0].text(0.285, -0.275, '"college county": counties where total IHE enrollment '+\ 'is at least %.2f%% of the total county population'%(percent_thresh*100), ha='left',va='top',color='.3',fontsize=fs*0.9,transform=ax[0].transAxes) # plt.savefig('../figs/pngs/matching_casedeaths_x1.png', bbox_inches='tight', dpi=425) # plt.savefig('../figs/pdfs/matching_casedeaths_x1.pdf', bbox_inches='tight', dpi=425) plt.show() ``` _________ ``` ihe_dfg = ihe_df.groupby(['ipeds_id','fips']).agg({'cumulative_cases':'sum', 'cumulative_tests':'sum'}).reset_index() cou_dfg = ihe_dfg.groupby(['fips']).agg({'cumulative_cases':'sum', 'cumulative_tests':'sum'}).reset_index() cou_dfg = cou_dfg.loc[cou_dfg['fips'].isin(college_county_fips)].copy() testmin = 0 cou_dfg['testing'] = 'non testing' cou_dfg.loc[cou_dfg['cumulative_tests']>testmin,'testing'] = 'testing' non_online_df = summary_df.loc[~summary_df['status_merge'].isin(['Hybrid', 'Primarily Online'])].copy() non_online_df = non_online_df.loc[non_online_df['frac_pop_enrollment_fulltime']>percent_thresh].copy() county_fips_for_testing = non_online_df['fips'].unique() cou_dfg_testing = cou_dfg.loc[cou_dfg['fips'].isin(county_fips_for_testing)].copy() fips_testing = cou_dfg_testing.loc[cou_dfg_testing['testing']=='testing']['fips'].unique() fips_nontest = cou_dfg_testing.loc[cou_dfg_testing['testing']!='testing']['fips'].unique() ll_testing = jhu_df.loc[jhu_df['fips'].isin(fips_testing)].copy() ll_nontest = jhu_df.loc[jhu_df['fips'].isin(fips_nontest)].copy() plot_dfs = [ll_testing, ll_nontest] labs = ['"college counties" that do\nreport campus testing (n=%i)'%(len(fips_testing)), '"college counties" that do not\nreport campus testing (n=%i)'%(len(fips_nontest))] fig, ax = plt.subplots(1,2,figsize=(10,4),dpi=200) plt.subplots_adjust(wspace=0.2) for ix,var_i in enumerate(plot_vars): for li, llg in enumerate(plot_dfs): llg_i = get_ci(llg, var_i, gb_col='date', percentile='90') ax[ix].plot([date_dict[i] for i in llg_i['date'].values], llg_i['nanmean'].rolling(window=wid).mean().values, label=labs[li], lw=lw, alpha=0.9, color=cols2[li], path_effects=pe2) ax[ix].fill_between([date_dict[i] for i in llg_i['date'].values], llg_i['ci95_lo'].rolling(window=wid).mean().values, llg_i['ci95_hi'].rolling(window=wid).mean().values, alpha=0.2, lw=0, color=cols2[li]) ylim0 = ax[ix].get_ylim() ax[ix].fill_between([date_dict[plot_min_date],date_dict['2020-08-01']],0,ylim0[1], color='.7', alpha=0.4, zorder=0, lw=0) ax[ix].fill_between([date_dict['2020-12-23'],date_dict[plot_max_date]],0,ylim0[1], color='.7', alpha=0.4, zorder=0, lw=0) ax[ix].text(date_dict['2020-10-10'],ylim0[1]*0.975,'Fall 2020 semester', va='top',ha='center',color='.3', path_effects=pe1, fontsize=fs*1.4) ax[ix].set_ylim(0,ylim0[1]) ax[0].set_title('Average new cases per 100k (%i-day avg.)'%wid, color='.2', x=0,ha='left',fontsize=fs*1.25) ax[1].set_title('Average new deaths per 100k (%i-day avg.)'%wid, color='.2', x=0,ha='left',fontsize=fs*1.25) l = ax[0].legend(fontsize=fs*0.9, loc=1, ncol=3, framealpha=0, bbox_to_anchor=[1.7, -0.11]) for text in l.get_texts(): text.set_color('.2') letts = ["(a)","(b)"] for ai, a in enumerate(fig.axes): a.text(-0.02, 1.015, letts[ai], ha='right', va='bottom', transform=a.transAxes, fontweight='bold', fontsize='large', color='.2') a.set_xticks([date_dict[i] for i in dates_tt]) a.set_xticklabels(dates_ll) a.tick_params(axis='y', labelcolor='.3', color='.3',labelsize=fs*1.2) a.tick_params(axis='x', labelcolor='.3', color='.3',labelsize=fs*1.1) a.grid(linewidth=1.0, color='.6', alpha=0.35) a.set_xlim(date_dict[plot_min_date], date_dict[plot_max_date]) ax[0].text(0.285, -0.275, '"college county": counties where total IHE enrollment '+\ 'is at least %.2f%% of the total county population'%(percent_thresh*100), ha='left',va='top',color='.3',fontsize=fs*0.9,transform=ax[0].transAxes) # plt.savefig('../figs/pngs/matching_casedeaths_testing.png', bbox_inches='tight', dpi=425) # plt.savefig('../figs/pdfs/matching_casedeaths_testing.pdf', bbox_inches='tight', dpi=425) plt.show() ```
github_jupyter
``` from sklearn.ensemble import VotingClassifier from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.linear_model.stochastic_gradient import SGDClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pickle import warnings from UnigramTfFeatureGeneration1 import create_feature_set_and_labels, create_test_feature_set_and_labels from UnigramTfidfFeaturesetGeneration1 import get_features, get_test_features def begin_test(train_x, test_x, train_y, test_y): x = train_x + test_x y = train_y + test_y clf1 = LinearRegression() clf2 = LogisticRegression() clf3 = SGDClassifier() clf4 = SVC() clf5 = KNeighborsClassifier() clf6 = MLPClassifier() clf7 = DecisionTreeClassifier() clf8 = MultinomialNB() eclf = VotingClassifier( estimators=[('logr', clf2), ('sgd', clf3), ('svm', clf4), ('kn', clf5), ('nn', clf6), ('dt', clf7)], voting='hard') for label, clf in zip( ['LogisticRegressionClassifier', 'SGDClassifierClassifier', 'SVCClassifier', 'NearestNeighbourClassifier', 'NeuralNetworkClassifier', 'DecisionTreeClassifier', 'MultinomialNB', 'EnsembleClassifier'], [clf2, clf3, clf4, clf5, clf6, clf7, clf8, eclf]): scores = cross_val_score(clf, x, y, cv=10, scoring='accuracy') filename = 'zeroth3_' + label + '.sav' pickle.dump(clf, open(filename, 'wb')) f_measure = cross_val_score(clf, x, y, cv=10, scoring='f1') print(label, "Accuracy: ", scores.mean(), "+/- ", scores.std()) print(label, "F-measure: ", f_measure.mean()) def test_by_tf(): train_x, train_y, test_x, test_y = create_feature_set_and_labels('pos_hindi_final.txt', 'neg_hindi_final.txt', 'pos_eng_final.txt', 'neg_eng_final.txt', 'pos_hinglish.txt', 'neg_hinglish.txt') #test_x, test_y = create_test_feature_set_and_labels('pos_hinglish.txt', 'neg_hinglish.txt') begin_test(train_x, test_x, train_y, test_y) def test_by_tfidf(): train_x, train_y, test_x, test_y = get_features() begin_test(train_x, test_x, train_y, test_y) warnings.filterwarnings('ignore') if __name__ == '__main__': print("="*10) #print("Unigram+Tf Accuracies") #test_by_tf() print("=" * 10) print("Unigram+Tfidf Accuracies") test_by_tfidf() warnings.filterwarnings('ignore') if __name__ == '__main__': print("="*10) #print("Unigram+Tf Accuracies") #test_by_tf() print("=" * 10) print("Unigram+Tfidf Accuracies") test_by_tfidf() warnings.filterwarnings('ignore') if __name__ == '__main__': print("="*10) #print("Unigram+Tf Accuracies") #test_by_tf() print("=" * 10) print("Unigram+Tfidf Accuracies") test_by_tfidf() # Tweeter warnings.filterwarnings('ignore') if __name__ == '__main__': #print("="*10) #print("Unigram+Tf Accuracies") #test_by_tf() print("=" * 10) print("Unigram+Tfidf Accuracies") test_by_tfidf() ```
github_jupyter