code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import division, print_function, absolute_import
from past.builtins import basestring #noqa
import matplotlib
get_ipython().magic(u'matplotlib inline')
from matplotlib import pyplot as plt
from IPython.display import display, HTML
import os
from decimal import Decimal
from traceback import print_exc
import pandas as pd
# you really want to be efficient about RAM, so user iter and itertools
# from itertools import izip
from twip.constant import DATA_PATH
from pugnlp.util import dict2obj
display(HTML("<style>.container { width:100% !important; }</style>"))
pd.set_option('display.max_rows', 6)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1200)
# +
import pandas as pd
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from pugnlp.segmentation import generate_sentences_from_files
nltk.download('vader_lexicon')
# -
# ### First we need to break up my rambling journal into sentences
# +
df = pd.DataFrame.from_records(generate_sentences_from_files('/home/hobs/Dropbox/notes/journal', ['md', 'txt']))
df.to_csv('/home/hobs/Dropbox/notes/journal/sentences.csv.gz')
df.size = df.size.astype(int)
df.len = df.sentence.str.len().astype(int)
df.words = df.sentence.str.split()
df.num_words = pd.Series([len(list(x)) for x in df.words], index=df.words.index)
df.index = pd.DatetimeIndex(df.modified.values)
# -
df.sort(inplace=True)
df
# ### Now let's analyze the sentiment of all those sentences
sia = SentimentIntensityAnalyzer()
sia.polarity_scores(df.sentence.iloc[0])
sentiment = pd.DataFrame([sia.polarity_scores(s) for s in df.sentence], index=df.sentence.index)
df = pd.concat((df, sentiment), axis=1)
df.created = pd.datetools.parse_time_string(df.created)
df.modified = pd.datetools.parse_time_string(df.modified)
df.index = pd.DatetimeIndex(df.modified)
df.to_csv('/home/hobs/Dropbox/notes/journal/journal_sentiment_dated.csv.gz')
df.neg = -df.neg
df[['name', 'sentence', 'pos', 'neu', 'neg']]
# ### Any Trends?
df[['neg', 'pos']].plot(style='.')
df[['neg', 'pos']].rolling(window=60, win_type='boxcar').mean().plot(style='.')
| docs/notebooks/22 sentiment -- Quantified Self.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python [conda env:PythonData] *
# language: python
# name: conda-env-PythonData-py
# ---
# # WeatherPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
from scipy import stats
import numpy as np
import requests
import pandas as pd
import json
from citipy import citipy
import random
# Import API key
from api_keys import weather_api_key
# -
# ## Generate Cities List
# +
cities = []
# generate random cities list
for x in range(1400):
# generate random latitude and longitude coordinates, store values in lists
r_lat = random.randint(-90,90)
r_lng = random.randint(-180,180)
# generate cities and append list based on randomised lat,lng.
city = citipy.nearest_city(r_lat,r_lng)
cities.append(city.city_name)
#print(f"{cities[x]} is nearest to the coordinates: {lat_list[x]} lat, {lng_list[x]} lng.")
cities = list(set(cities))
len(cities), cities
# +
# original output for reference
# -
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# +
#configure url and units
url = "http://api.openweathermap.org/data/2.5/weather?"
units = "imperial"
# build partial query URL
query_url = f"{url}appid={weather_api_key}&units={units}&q="
# create list to store temperatures
owm_cities = []
lat = []
lng = []
temp = []
humid = []
cloud = []
wind_spd = []
country = []
date = []
# counters for numbering records
a = 0
r = 0
s = 1
# header for records
print("Beginning Data Retrieval")
print("-----------------------------")
# function to capture exceptions where no data available
def cities_data(current_city,ci,la,ln,tm,hm,cl,ws,co,da,resp,rx,sx,ax):
try:
print(f"Processing Record {rx+1} of Set {sx} | {current_city[ax]}")
ci.append(resp["name"])
la.append(resp["coord"]["lat"])
ln.append(resp["coord"]["lon"])
tm.append(resp['main']['temp_max'])
hm.append(resp['main']['humidity'])
cl.append(resp["clouds"]["all"])
ws.append(resp["wind"]["speed"])
co.append(resp["sys"]["country"])
da.append(resp["dt"])
except KeyError:
print(f"City not found. Skipping...")
pass
return current_city,ci,la,ln,tm,hm,cl,co,da,resp,rx,sx,ax
# loop through API request
for cit in cities:
response = requests.get(query_url + cit).json()
if a <= len(cities):
if r < 49:
cities_data(cities,owm_cities,lat,lng,temp,humid,cloud,wind_spd,country,date,response,r,s,a)
r += 1
a += 1
else:
cities_data(cities,owm_cities,lat,lng,temp,humid,cloud,wind_spd,country,date,response,r,s,a)
r = 0
a += 1
s += 1
else:
print("Uh-oh, something went wrong!")
# footer for records
print("-----------------------------")
print("Data Retrieval Complete")
print("-----------------------------")
# -
print(len(cities),a,len(owm_cities),len(lat),len(lng),len(temp),len(humid),len(cloud),len(wind_spd),len(country),len(date))
#print(json.dumps(response, indent=4, sort_keys=True))
# +
# original output for reference
# -
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
# +
# create new DataFrame with the data extracted from the JSON of openweathermap.org
df = pd.DataFrame ({
"City": owm_cities,
"Lat": lat,
"Lng": lng,
"Max Temp": temp,
"Humidity": humid,
"Cloudiness": cloud,
"Wind Speed": wind_spd,
"Country": country,
"Date": date
})
# drop any duplicate cities from the DataFrame
clean_df = df.drop_duplicates()
clean_df.count()
clean_df.head()
# +
# original output for reference
# -
# get summary statistics using the .describe() method
clean_df.describe()
# +
# original output for reference
# -
# export DataFrame to CSV
output_csv = clean_df.to_csv("../output_data/cities.csv")
output_csv
# ## Inspect the data and remove the cities where the humidity > 100%.
# ----
# Skip this step if there are no cities that have humidity > 100%.
# +
# original output for reference
# -
# Get the indices of cities that have humidity over 100%.
indices = clean_df.index[clean_df["Humidity"]>100]
indices
# +
# original output for reference
# -
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
# +
# not required: no cities in dataframe clean_df have > 100% humidity
# +
#test = clean_df.loc[clean_df["Lat"]>=0,:]
#test["Lat"]
# -
# ## Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# ## Latitude vs. Temperature Plot
# +
# define function to run all scatter plots comparing latitude to various criteria
def scatter_plot(x_axis, y_axis, x_lab, y_lab, x_lim_0, x_lim_1, y_lim_0, y_lim_1, chart_title):
plt.scatter(x_axis, y_axis, marker="o", facecolors="blue", edgecolors="black", alpha=0.5)
plt.xlabel(x_lab)
plt.ylabel(y_lab)
plt.xlim(x_lim_0, x_lim_1)
plt.ylim(y_lim_0, y_lim_1)
plt.title(chart_title)
return x_axis, y_axis, x_lab, y_lab, x_lim_0, x_lim_1, y_lim_0, y_lim_1, chart_title
# DataFrame series set as variables used for above function
lat_plot = clean_df.Lat
temp_plot = clean_df["Max Temp"]
humid_plot = clean_df.Humidity
cloud_plot = clean_df.Cloudiness
wind_spd_plot = clean_df["Wind Speed"]
# +
# plot a scatter plot comparing City Latitude to Maximum Temperature
scatter_plot(lat_plot, temp_plot, "Latitude", "Max Temperature (F)", -61, 85, -15, 110, "City Latitude vs. Max Temperature (11/01/21)")
plt.grid()
# export .png
plt.savefig("../output_data/Fig1.png")
plt.show()
# print analysis
print(f"The scatter plot shows a generally higher temperature range at lower latitudes (between -20 and 20) with significantly lower temperatures further from the equator (e.g. 40 - 80).")
# +
# original output for reference
# -
# ## Latitude vs. Humidity Plot
# +
# plot a scatter plot comparing City Latitude to Humidity
scatter_plot(lat_plot, humid_plot, "Latitude", "Humidity (%)", -61, 85, -15, 110, "City Latitude vs. Humidity (11/01/21)")
plt.grid()
# export .png
plt.savefig("../output_data/Fig2.png")
plt.show()
# print analysis
print(f"There appears to be no discernable correlation between a city's latitude and humidity.")
# +
# original output for reference
# -
# ## Latitude vs. Cloudiness Plot
# +
# Plot a scatter plot comparing City Latitude to Cloudiness
scatter_plot(lat_plot, cloud_plot, "Latitude", "Cloudiness (%)", -61, 85, -15, 110, "City Latitude vs. Cloudiness (11/01/21)")
plt.grid()
# export .png
plt.savefig("../output_data/Fig3.png")
plt.show()
# print analysis
print(f"There appears to be no discernable correlation between a city's latitude and cloudiness. The distribution appears fairly even throughout all latitudes.")
# +
# original output for reference
# -
# ## Latitude vs. Wind Speed Plot
# +
# plot a scatter plot comparing City Latitude to Wind Speed
scatter_plot(lat_plot, wind_spd_plot, "Latitude", "Wind Speed (mph)", -61, 85, -2, 48, "City Latitude vs. Wind Speed (11/01/21)")
plt.grid()
# export .png
plt.savefig("../output_data/Fig4.png")
plt.show()
# print analysis
print(f"There appears to be no discernable correlation between a city's latitude and wind speed. In general, most cities appear to be within the 0 - 20 mph range.")
# +
# original output for reference
# -
# ## Linear Regression
# #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
# define function to run all Northern Hemisphere scatter plots and finding linear regression of latitude to various criteria
def linear_reg_north(x_series, y_series, x_label_lr, y_label_lr, x_lim_0_lr, x_lim_1_lr, y_lim_0_lr, y_lim_1_lr,chart_title_lr,lex,ley):
x_lr = clean_df.loc[clean_df[x_series]>=0,:][x_series]
y_lr = clean_df.loc[clean_df[x_series]>=0,:][y_series]
# perform scipy linear regression
cities_slope, cities_int, cities_r, cities_p, cities_std_err = stats.linregress(x_lr, y_lr)
cities_fit = cities_slope * x_lr + cities_int
# run scatter_plot function to get scatter plot
scatter_plot(x_lr, y_lr, x_label_lr, y_label_lr, x_lim_0_lr, x_lim_1_lr, y_lim_0_lr, y_lim_1_lr, chart_title_lr)
# create linear regression on scatter plot and annotate
plt.plot(x_lr,cities_fit,"--",color="red")
line_eq = "y = " + str(round(cities_slope,2)) + "x + " + str(round(cities_int,2))
plt.annotate(line_eq,(lex,ley),fontsize=15,color="red")
return cities_slope, cities_int, cities_r, cities_p, cities_std_err, cities_fit
# +
# plot a scatter plot comparing Northern Hemisphere Latitude to Max Temp
temp_ln_n = linear_reg_north("Lat", "Max Temp", "Latitude", "Max Temp", 0, 80, -10, 120, "Northern Hemisphere - Max Temp (F) vs Latitude Linear Regression",6,6)
print(f"The r-value is: {temp_ln_n[2]}")
# export .png
plt.savefig("../output_data/Fig5.png")
plt.show()
# print analysis
print(f"There appears to be a strong correlation with an increase in latitude and decrease in max. temperatures, evidenced by the high r value.")
# +
# original output for reference
# -
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
# define function to run all Southern Hemisphere scatter plots and finding linear regression of latitude to various criteria
def linear_reg_south(x_series, y_series, x_label_lr, y_label_lr, x_lim_0_lr, x_lim_1_lr, y_lim_0_lr, y_lim_1_lr,chart_title_lr,lex,ley):
x_lr = clean_df.loc[clean_df[x_series]<0,:][x_series]
y_lr = clean_df.loc[clean_df[x_series]<0,:][y_series]
# perform scipy linear regression
cities_slope, cities_int, cities_r, cities_p, cities_std_err = stats.linregress(x_lr, y_lr)
cities_fit = cities_slope * x_lr + cities_int
# run scatter_plot function to get scatter plot
scatter_plot(x_lr, y_lr, x_label_lr, y_label_lr, x_lim_0_lr, x_lim_1_lr, y_lim_0_lr, y_lim_1_lr, chart_title_lr)
# create linear regression on scatter plot and annotate
plt.plot(x_lr,cities_fit,"--",color="red")
line_eq = "y = " + str(round(cities_slope,2)) + "x + " + str(round(cities_int,2))
plt.annotate(line_eq,(lex,ley),fontsize=15,color="red")
return cities_slope, cities_int, cities_r, cities_p, cities_std_err, cities_fit
# +
# plot a scatter plot comparing Southern Hemisphere Latitude to Max Temp
temp_ln_s = linear_reg_south("Lat", "Max Temp", "Latitude", "Max Temp", -60, 5, -10, 120, "Southern Hemisphere - Max Temp (F) vs Latitude Linear Regression", -25, 3)
print(f"The r-value is: {temp_ln_s[2]}")
# export .png
plt.savefig("../output_data/Fig6.png")
plt.show()
# print analysis
print(f"There appears to be somewhat of a correlation with an decrease in latitude (i.e. further from the equator) and decrease in max. temperatures, evidenced by the medium r value.")
# +
# original output for reference
# -
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
# plot a scatter plot comparing Northern Hemisphere Latitude to Humidity
humid_ln_n = linear_reg_north("Lat", "Humidity", "Latitude", "Humidity", 0, 80, 5, 110, "Northern Hemisphere - Humidity (%) vs Latitude Linear Regression",30,10)
print(f"The r-value is: {humid_ln_n[2]}")
# export .png
plt.savefig("../output_data/Fig7.png")
plt.show()
# print analysis
print(f"There appears to be a weak correlation with an increase in latitude away from the equator and increase in humidity, evidenced by the somewhat low r value and a lot of datapoints far apart from the linear regression line.")
# +
# original output for reference
# -
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
# plot a scatter plot comparing Southern Hemisphere Latitude to Humidity
humid_ln_s = linear_reg_south("Lat", "Humidity", "Latitude", "Humidity", -60, 5, 5, 105, "Southern Hemisphere - Humidity (%) vs Latitude Linear Regression", -25,10)
print(f"The r-value is: {humid_ln_s[2]}")
# export .png
plt.savefig("../output_data/Fig8.png")
plt.show()
# print analysis
print(f"There appears to be a weak correlation with an increase in latitude towards the equator and an increase in humidity, evidenced by the low r value and a lot of datapoints far apart from the linear regression line.")
# +
# original output for reference
# -
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
# plot a scatter plot comparing Northern Hemisphere Latitude to Cloudiness
cloud_ln_n = linear_reg_north("Lat", "Cloudiness", "Latitude", "Cloudiness", 0, 80, -5, 105, "Northern Hemisphere - Cloudiness (%) vs Latitude Linear Regression",15,8)
print(f"The r-value is: {cloud_ln_n[2]}")
# export .png
plt.savefig("../output_data/Fig9.png")
plt.show()
# print analysis
print(f"There appears to be a weak correlation with an increase in latitude away from the equator and increase in cloudiness, evidenced by the low r value and a lot of datapoints far apart from the linear regression line.")
# +
# original output for reference
# -
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
# plot a scatter plot comparing Southern Hemisphere Latitude to Cloudiness
cloud_ln_s = linear_reg_south("Lat", "Cloudiness", "Latitude", "Cloudiness", -60, 5, -5, 105, "Southern Hemisphere - Cloudiness (%) vs Latitude Linear Regression",-50,6)
print(f"The r-value is: {cloud_ln_s[2]}")
# export .png
plt.savefig("../output_data/Fig10.png")
plt.show()
# print analysis
print(f"There appears to be a weak correlation with an increase in latitude towards the equator and an increase in cloudiness, evidenced by the low r value and a lot of datapoints far apart from the linear regression line.")
# +
# original output for reference
# -
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
# plot a scatter plot comparing Northern Hemisphere Latitude to Wind Speed
wind_ln_n = linear_reg_north("Lat", "Wind Speed", "Latitude", "Wind Speed", 0, 80, -5, 105, "Northern Hemisphere - Wind Speed (mph) vs Latitude Linear Regression", 6,30)
print(f"The r-value is: {wind_ln_n[2]}")
# export .png
plt.savefig("../output_data/Fig11.png")
plt.show()
# print analysis
print(f"There appears to be a weak correlation with an increase in latitude away from the equator and any change in wind speed, evidenced by the low r value.")
# +
# original output for reference
# -
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
# plot a scatter plot comparing Southern Hemisphere Latitude to Wind Speed
wind_ln_s = linear_reg_south("Lat", "Wind Speed", "Latitude", "Wind Speed", -60, 5, -5, 105, "Southern Hemisphere - Wind Speed (mph) vs Latitude Linear Regression",-25,30)
print(f"The r-value is: {wind_ln_s[2]}")
# export .png
plt.savefig("../output_data/Fig12.png")
plt.show()
# print analysis
print(f"There appears to be a weak correlation with an increase in latitude towards the equator and any change in wind speed, evidenced by the low r value.")
# +
# original output for reference
# -
| WeatherPy/WeatherPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial on collocaing a dataset with lagged environmental data
# +
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import pandas as pd
import warnings
# filter some warning messages
warnings.filterwarnings("ignore")
from geopy.distance import geodesic
####################you will need to change some paths here!#####################
#list of input files
filename_bird='f:/data/project_data/NASA_biophysical/collocated_data/zoo_selgroups_HadSST_relabundance_5aug2019_plumchrusV_4regions_final.csv'
#output files
filename_bird_out='f:/data/project_data/NASA_biophysical/collocated_data/zoo_selgroups_HadSST_relabundance_5aug2019_plumchrusV_4regions_final_satsst.csv'
filename_bird_out_netcdf='f:/data/project_data/NASA_biophysical/collocated_data/zoo_selgroups_HadSST_relabundance_5aug2019_plumchrusV_4regions_final_satsst.nc'
#################################################################################
# -
# ## Reading CSV datasets
#read in csv file in to panda dataframe & into xarray
df_bird = pd.read_csv(filename_bird)
ds_bird = df_bird.to_xarray()
#calculate time
#error in time, later data includes date and hour
ilen_bird = len(ds_bird.lat)
ds_bird['time64'] = xr.DataArray(np.empty(ilen_bird, dtype='datetime64[ns]'), coords={'index': ds_bird.index}, dims=('index'))
for i in range(len(df_bird)):
tem = df_bird.time[1]
ii=tem.find(' ')
shr = str(tem[ii+1:])
tstr = str(df_bird.year[i])+'-'+str(df_bird.month[i]).zfill(2)+'-'+str(df_bird.day[i]).zfill(2)+'T'+shr
ds_bird['time64'][i]=np.datetime64(tstr)
ds_bird = ds_bird.rename({'long':'lon'})
ds_bird
#just check lat/lon & see looks okay
minlat,maxlat=ds_bird.lat.min(),ds_bird.lat.max()
minlon,maxlon=ds_bird.lon.min(),ds_bird.lon.max()
plt.scatter(ds_bird.lon,ds_bird.lat)
print(minlat,maxlat,minlon,maxlon)
#open cmc sst
ds = xr.open_zarr('F:/data/sat_data/sst/cmc/zarr').drop({'analysis_error','mask','sea_ice_fraction'})
ds
#average 0.6 deg in each direction to create mean
ds = ds.rolling(lat=3,center=True,keep_attrs=True).mean(keep_attrs=True)
ds = ds.rolling(lon=3,center=True,keep_attrs=True).mean(keep_attrs=True)
ds
ds_mon = ds.rolling(time=30, center=False,keep_attrs=True).mean(keep_attrs=True)
ds_15 = ds.rolling(time=15, center=False,keep_attrs=True).mean(keep_attrs=True)
ds['analysed_sst_1mon']=ds_mon['analysed_sst']
ds['analysed_sst_15dy']=ds_15['analysed_sst']
ds
# # Collocate all data with bird data
len(ds_bird.lat)
# +
ds_data = ds
for var in ds_data:
var_tem=var
ds_bird[var_tem]=xr.DataArray(np.empty(ilen_bird, dtype=str(ds_data[var].dtype)), coords={'index': ds_bird.index}, dims=('index'))
ds_bird[var_tem].attrs=ds_data[var].attrs
print('var',var_tem)
for i in range(len(ds_bird.lat)):
# for i in range(len(ds_bird.lat)):
# if ds_bird.time[i]<ds_data.time.min():
# continue
# if ds_bird.time[i]>ds_data.time.max():
# continue
t1,t2 = ds_bird.time64[i]-np.timedelta64(24,'h'), ds_bird.time64[i]+np.timedelta64(24,'h')
lat1,lat2=ds_bird.lat[i]-.5,ds_bird.lat[i]+.5
lon1,lon2=ds_bird.lon[i]-.5,ds_bird.lon[i]+.5
tem = ds_data.sel(time=slice(t1,t2),lat=slice(lat1,lat2),lon=slice(lon1,lon2)).load()
tem = tem.interp(time=ds_bird.time64[i],lat=ds_bird.lat[i],lon=ds_bird.lon[i])
#tem = tem.load()
for var in ds_data:
var_tem=var
ds_bird[var_tem][i]=tem[var].data
if int(i/100)*100==i:
print(i,len(ds_bird.lat))
#output data
df_bird = ds_bird.to_dataframe()
df_bird.to_csv(filename_bird_out)
#ds_bird.to_netcdf(filename_bird_out_netcdf)
# -
var2
#test rolling to check
print(da.data)
da = xr.DataArray(np.linspace(0, 11, num=12),coords=[pd.date_range( "15/12/1999", periods=12, freq=pd.DateOffset(months=1), )],dims="time",)
dar = da.rolling(time=3,center=False).mean() #before and up too
print(dar.data)
| zoo_data_sst_lag.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--NOTEBOOK_HEADER-->
# *This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks);
# content is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).*
# <!--NAVIGATION-->
# < [How to Get Started](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/01.00-How-to-Get-Started.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Jupyter Notebooks, Python, and Google Colaboratory](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/01.02-Notebooks-Python-Colab.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/01.01-How-to-Get-Local-PyRosetta.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
# # How to Get PyRosetta on Your Personal Computer
# ## Download the main necessary packages to work locally on your machine
# ### Python 3.6 (and preferably IPython for tab-completion)
# Select the Python 3.6.7 version to download from https://www.python.org/downloads/. For IPython, you should be able to type `pip install ipython` in your terminal.
# ### PyMOL
#
# Here is the link to the free educational version: https://pymol.org/edu/?q=educational/.
# ### PyRosetta-3.6.Release
# 1) Go to https://els.comotion.uw.edu/licenses/88 and fill out the form for a free academic license for PyRosetta
#
# 2) Download PyRosetta and user your terminal to navigate into the folder it creates
#
# 3) I recommend moving that folder (ex. PyRosetta4.Release.python36.mac.release) out of your Downloads directory and into your home directory (remember ~/)
#
# 4) cd into the setup directory and type `python3.6 setup.py install` (you may need to add sudo before the python part if your computer says you don’t have permission
#
#
# <!--NAVIGATION-->
# < [How to Get Started](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/01.00-How-to-Get-Started.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Jupyter Notebooks, Python, and Google Colaboratory](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/01.02-Notebooks-Python-Colab.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/01.01-How-to-Get-Local-PyRosetta.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
| notebooks/01.01-How-to-Get-Local-PyRosetta.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PATE GAN based on DCGAN
# We will attempt to employ sophisticated tecniques of PATE GANs
import os
import torch
import torchvision
import torch.nn as nn
from torchvision import transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import matplotlib.pyplot as plt
import pylab
import numpy as np
# %load_ext autoreload
# %autoreload 2
n_teachers = 10
# +
import os
GDRIVE_PATH = ''
THIS_EXERCISE_PATH = os.path.join(GDRIVE_PATH, "GAN_exercises")
MODELS_HOME = os.path.join(THIS_EXERCISE_PATH, "mnist guns")
# -
GENERATOR_FILE = os.path.join(MODELS_HOME, 'generator_pate.pt')
DISCRIMINATOR_FILE = os.path.join(MODELS_HOME, 'student_pate.pt')
TEACHERS_FILE = [os.path.join(MODELS_HOME, 'teacher_pate_' + str(i) + '.pt') for i in range(n_teachers)]
#TODO teacher's files - ?
os.makedirs(THIS_EXERCISE_PATH, exist_ok=True)
os.makedirs(MODELS_HOME, exist_ok=True)
# +
class Reshape(torch.nn.Module):
"""
Reshapes a tensor starting from the 1st dimension (not 0th),
i. e. without influencing the batch dimension.
"""
def __init__(self, *shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(x.shape[0], *self.shape)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.shape[0], -1)
# -
# # Load data
LEAK_SHARE = 0.2 #how much elements have been delivered to an adversary
import pandas as pd
df = pd.read_csv('https://query.data.world/s/nap7jvxtupud25z5ljvtbzzjjsqqay')
df.head()
target = pd.read_csv('https://query.data.world/s/sn3dximsq5sw3a6wtqoc3okulevugz')
target.head()
from sklearn.model_selection import train_test_split
train, test, tar_train, tar_test = train_test_split(df, target, test_size=0.2, random_state=12345)
# +
train = np.array(train, dtype='float')
test = np.array(test, dtype='float')
tar_train = np.array(tar_train, dtype='float')
tar_test = np.array(tar_test, dtype='float')
train= train.reshape((-1, 1, 28, 28)) / 255.
test= test.reshape((-1, 1, 28, 28)) / 255.
# -
n_leak = round(LEAK_SHARE * train.shape[0])
train_leak = train[np.random.permutation(train.shape[0])]
train_check = train_leak[n_leak:]
train_leak = train_leak[0:n_leak]
# # The net itself
# Generator (the only one) with a discriminator as the student.
# +
CODE_SIZE = 100
DROPOUT_RATE = 0.2
nc=1
nz=100
ngf=64
ndf=64
try:
generator = torch.load(GENERATOR_FILE)
discriminator = torch.load(DISCRIMINATOR_FILE)
except FileNotFoundError:
print('Files have not been not found: making new nets\n')
generator = torch.nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d( ngf,nc, kernel_size=1, stride=1, padding=2, bias=False),
nn.Tanh()
).cuda()
student = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, 1, 4, 2, 1, bias=False),
#nn.Sigmoid()
).cuda()
# -
# Several teachers.
teacher = []
for i in range(n_teachers):
teacher.append(nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, 1, 4, 2, 1, bias=False),
#nn.Sigmoid()
).cuda())
#pred is teacher's prediction: 2d array: pred[i][0] - likelihood of fake, pred[i][1] = 1 - pred[i][0] - likelihood of real
def PATE(lam, pred):
return (pred.sum(axis=0) + \
np.random.exponential(1/lam, (pred.shape[1])) - \
np.random.exponential(1/lam, (pred.shape[1]))) < \
(n_teachers - pred.sum(axis=0) + \
np.random.exponential(1/lam, (pred.shape[1])) - \
np.random.exponential(1/lam, (pred.shape[1])))
EPSILON = 0.01
lam = train.shape[0] / EPSILON #see page 4, there is a theorem
def sample_fake(batch_size):
noise = torch.randn(batch_size, CODE_SIZE, 1, 1, device="cuda")
return generator(noise)
# ### Train set division
train_teach = train.reshape((10, -1, 1, 28, 28)) #train set division
train.shape
train_teach.shape
def sample_images_for_teacher(batch_size, num):
ids = np.random.choice(len(train_teach[num]), size=batch_size)
return torch.tensor(train_teach[num, ids], device="cuda").float()
def sample_images(batch_size, train=train):
ids = np.random.choice(len(train), size=batch_size)
return torch.tensor(train[ids], device="cuda").float()
# ### Losses (?)
# +
from torch.nn.functional import logsigmoid
from torch.nn.functional import sigmoid
def generator_loss(fake):
return logsigmoid(-student(
fake
)).mean() #log(1 - sigmoid(student_prediction))
def student_loss(fake, pred):
r = torch.tensor(PATE(lam, pred), device = 'cuda').float()
return (r * logsigmoid(student(fake))).mean() + \
((1-r)*logsigmoid(-student(fake))).mean()
def teacher_loss(real, fake, num):
return -logsigmoid(teacher[num](
real
)).mean() - \
logsigmoid(-teacher[num](
fake
)).mean()
# +
optimizer_generator = \
torch.optim.RMSprop(generator.parameters(), lr=1e-3)
optimizer_student = \
torch.optim.RMSprop(student.parameters(), lr=1e-3)
student_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_student, step_size=10, gamma=0.999)
gen_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_generator, step_size=10, gamma=0.999)
#TODO teacher optimisers - ???
optimizer_teacher = []
teacher_scheduler = []
for i in range(n_teachers):
optimizer_teacher.append(torch.optim.RMSprop(teacher[i].parameters(), lr=0.001))
teacher_scheduler.append(torch.optim.lr_scheduler.StepLR(optimizer_teacher[i], step_size=10, gamma=0.999))
# +
def gen_train():
fake = sample_fake(BATCH_SIZE)
gen_loss = generator_loss(fake)
optimizer_generator.zero_grad()
gen_loss.backward()
optimizer_generator.step()
return gen_loss
def teacher_train(num):
real = sample_images_for_teacher(BATCH_SIZE, num)
fake = sample_fake(BATCH_SIZE)
tea_loss = teacher_loss(real, fake, num)
optimizer_teacher[num].zero_grad()
tea_loss.backward()
optimizer_teacher[num].step()
return tea_loss
def student_train():
fake = sample_fake(BATCH_SIZE)
pred = np.array([(sigmoid(teacher[i](fake))).cpu().detach().numpy() for i in range(n_teachers)])
stud_loss = student_loss(fake, pred)
optimizer_student.zero_grad()
stud_loss.backward()
optimizer_student.step()
return stud_loss
# -
VALIDATION_INTERVAL = 150
SAVE_INTERVAL = 500
NT = 5
NS = 5
BATCH_SIZE=64
# +
import matplotlib
def plot_images(images: np.ndarray,
nrows: int=5, ncols: int=5,
shuffle: bool=True,
title: str="",
figure: matplotlib.figure.Figure=None) -> matplotlib.figure.Figure:
"""
Plots a subset of images.
Args:
images[n_images, n_channels, width, height]: a dataset with images to plot
nrows: number of images in a plotted row
ncols: numer of images in a plotted colunm
shuffle: if True draw a random subset of images, if False -- the first ones
figure: if not None, it's used for plotting, if None, a new one is created
Returns:
a figure containing the plotted images
"""
if shuffle:
images_to_plot = images[np.random.permutation(len(images))[:nrows*ncols]]
else:
images_to_plot = images[:nrows * ncols]
h, w = images_to_plot.shape[2:]
if figure is None:
figure = plt.figure(figsize=(8,8))
axes = figure.subplots(nrows=nrows, ncols=ncols)
for row_idx, ax_row in enumerate(axes):
for col_idx, ax in enumerate(ax_row):
ax.imshow(images_to_plot[row_idx + ncols*col_idx, 0],
interpolation="none")
ax.set_axis_off()
figure.suptitle(title, fontsize=18)
return figure
#plot_images(train, title="Some digits");
# -
from IPython.display import clear_output
for i in range(2000):
# Set our models to training mode:
generator.train()
student.train()
gen_scheduler.step()
student_scheduler.step()
for num in range(n_teachers):
teacher[num].train()
teacher_scheduler[num].step()
# Several discriminator updates per step:
for j in range(NT):
for k in range(n_teachers):
# Sampling reals and fakes
teacher_train(k)
for j in range(NS):
student_train()
gen_train()
if i % SAVE_INTERVAL == 0:
torch.save(generator, GENERATOR_FILE)
torch.save(student, DISCRIMINATOR_FILE)
for num in range(n_teachers):
torch.save(teacher[num], TEACHERS_FILE[num])
if i % VALIDATION_INTERVAL == 0:
clear_output(wait=True)
generator.eval()
imgs = sample_fake(25).cpu().detach().numpy()
plot_images(imgs.clip(0, 1), title='Iteration '+str(i));
plt.show();
fake = sample_fake(BATCH_SIZE)
pred = np.array([(sigmoid(teacher[i](fake))).cpu().detach().numpy() for i in range(n_teachers)])
fake.shape
pred.shape
| GAN MNIST/from nd/PATE GAN based on DCGAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 1
# +
n = int(input("Enter the current altitude in ft : "))
if n <= 1000:
print("Safe to land")
elif n>1000 and n<5000:
print("Bring down to 1000 ft")
else :
print("Turn around")
# -
# # Assignment 2
for n in range(1, 201):
if n>1:
flag = 0
for i in range(2, n):
if (n % i) == 0:
flag = 1
break
if (flag == 0):
print(n)
| Assignments Day 3_Python-B7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/danielsoy/ALOCC-CVPR2018/blob/master/alibinetOD_condensada.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="VMmftWsoNT9e"
# https://youtu.be/Pql6ShORpNU
# + [markdown] id="so2Yjw_kNT9g"
#
# <br>
# Outlier detection using alibi-detect<br>
# Alibi Detect is an open source Python library focused on outlier, adversarial and drift detection. <br>
# The package aims to cover both online and offline detectors for tabular data, text, <br>
# images and time series. The outlier detection methods should allow the user to <br>
# identify global, contextual and collective outliers.<br>
# pip install alibi-detect<br>
# https://github.com/SeldonIO/alibi-detect<br>
# Documentation: https://docs.seldon.io/_/downloads/alibi-detect/en/v0.5.1/pdf/<br>
# We will be using VAE based outlier detection. Based on this paper:<br>
# https://arxiv.org/pdf/1312.6114.pdf<br>
# <br>
# The Variational Auto-Encoder (VAE) outlier detector is first trained on a batch <br>
# of unlabeled, but normal (inlier) data. Unsupervised training is desireable since <br>
# labeled data is often scarce. The VAE detector tries to reconstruct the input it <br>
# receives. If the input data cannot be reconstructed well, the reconstruction error <br>
# is high and the data can be flagged as an outlier. The reconstruction error is either <br>
# measured as the mean squared error (MSE) between the input and the reconstructed instance <br>
# or as the probability that both the input and the reconstructed instance are <br>
# generated by the same process.<br>
# Data set info: https://openaccess.thecvf.com/content_CVPR_2019/papers/Bergmann_MVTec_AD_--_A_Comprehensive_Real-World_Dataset_for_Unsupervised_Anomaly_CVPR_2019_paper.pdf<br>
# Data set link: https://www.mvtec.com/company/research/datasets/mvtec-ad<br>
#
# + id="FyzVm4tnNT9h"
import os
import cv2
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="X_yXQi67NT9j" outputId="17aaeea3-72bc-473e-df69-ef18d16df678"
import tensorflow as tf
print(tf.__version__)
# + colab={"base_uri": "https://localhost:8080/"} id="rD_cH7MKR478" outputId="9261991f-d736-4e00-afd7-abb39544626a"
# !git clone https://github.com/SeldonIO/alibi-detect.git
# + colab={"base_uri": "https://localhost:8080/"} id="d12tf8jGTHL1" outputId="5976ff87-ca33-4bc4-8d4f-0972b6137616"
# !pip install alibi
# + id="smbjaerHNT9j"
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Dense, Reshape, InputLayer, Flatten
# + colab={"base_uri": "https://localhost:8080/"} id="CUFQasJJSao9" outputId="d49907c4-a267-4b5a-ee13-68982bdab12c"
# %cd /content/alibi-detect
# + id="ceHHPOpDNT9k"
from alibi_detect.od import OutlierAE, OutlierVAE
from alibi_detect.utils.visualize import plot_instance_score, plot_feature_outlier_image
# + [markdown] id="7U-_4bAVNT9k"
# ########################################################################<br>
# oad data. We only need good data and anything NOT good is an outlier.
# + id="xHVVV_zCT6b9"
from google.colab import drive
# + colab={"base_uri": "https://localhost:8080/"} id="b9cCEQCoLamk" outputId="27383a13-19c2-4bc5-bd5e-053f4877893f"
drive.mount('/content/drive')
# + id="Wb5lhRd5NT9k"
image_directory = '/content/drive/MyDrive/mvtec_dataset/screw/train/'
SIZE = 64
dataset = [] #Many ways to handle data, you can use pandas. Here, we are using a list format.
# + id="KUrGuS3oNT9k"
good_images = os.listdir(image_directory + 'good/')
for i, image_name in enumerate(good_images):
if (image_name.split('.')[1] == 'png'):
image = cv2.imread(image_directory + 'good/' + image_name)
image = Image.fromarray(image, 'RGB')
image = image.resize((SIZE, SIZE))
dataset.append(np.array(image))
# + id="0ThKRHwoNT9l"
dataset = np.array(dataset)
# + id="aLqOw3E8NT9l"
train = dataset[0:200]
test = dataset[200:279]
# + id="MTLDlVUGNT9l"
train = train.astype('float32') / 255.
test = test.astype('float32') / 255.
# + [markdown] id="z8iIrwbpNT9m"
# et us also load bad images to verify our trained model.
# + id="UMq4Vzu1NT9m"
bad_images = os.listdir(image_directory + 'bad')
bad_dataset=[]
for i, image_name in enumerate(bad_images):
if (image_name.split('.')[1] == 'png'):
image = cv2.imread(image_directory + 'bad/' + image_name)
image = Image.fromarray(image, 'RGB')
image = image.resize((SIZE, SIZE))
bad_dataset.append(np.array(image))
bad_dataset = np.array(bad_dataset)
bad_dataset = bad_dataset.astype('float32') / 255.
# + [markdown] id="p-qGseI2NT9m"
# #######################################################################<br>
# efine the encoder - decoder network for input to the OutlierVAE detector class. <br>
# an be any encoder and decoder.
# + id="pHr5tyNCNT9m"
encoding_dim = 1024 #Dimension of the bottleneck encoder vector.
dense_dim = [8, 8, 512] #Dimension of the last conv. output. This is used to work our way back in the decoder.
# + [markdown] id="_0dXov9HNT9n"
# efine encoder
# + id="wNchdaFmNT9n"
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=train[0].shape),
Conv2D(64, 4, strides=2, padding='same', activation=tf.nn.relu),
Conv2D(128, 4, strides=2, padding='same', activation=tf.nn.relu),
Conv2D(512, 4, strides=2, padding='same', activation=tf.nn.relu),
Flatten(),
Dense(encoding_dim,)
])
# + colab={"base_uri": "https://localhost:8080/"} id="SOeo4xJjNT9n" outputId="b35a734b-f09d-4049-9a66-41a63b55bfd8"
print(encoder_net.summary())
#print(encoder_net.input_shape)
# + [markdown] id="Q-DG2AMTNT9n"
# efine the decoder. <br>
# tart with the bottleneck dimension (encoder vector) and connect to dense layer <br>
# ith dim = total nodes in the last conv. in the encoder.
# + id="xJ2WTL1iNT9n"
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(encoding_dim,)),
Dense(np.prod(dense_dim)),
Reshape(target_shape=dense_dim),
Conv2DTranspose(256, 4, strides=2, padding='same', activation=tf.nn.relu),
Conv2DTranspose(64, 4, strides=2, padding='same', activation=tf.nn.relu),
Conv2DTranspose(3, 4, strides=2, padding='same', activation='sigmoid')
])
# + colab={"base_uri": "https://localhost:8080/"} id="-edSGSokNT9o" outputId="de7c266a-3562-4a65-d408-397f408cd635"
print(decoder_net.summary())
#print(decoder_net.input_shape)
# + [markdown] id="R8gChNT6NT9o"
# #####################################################################<br>
# efine and train the outlier detector.
# + id="NGeb6s0RNT9o"
latent_dim = 1024 #(Same as encoding dim. )
# + [markdown] id="8tDBrcdJNT9o"
# initialize outlier detector
# + id="rzN7UcHGNT9o"
od = OutlierVAE(threshold=.015, # threshold for outlier score above which the element is flagged as an outlier.
score_type='mse', # use MSE of reconstruction error for outlier detection
encoder_net=encoder_net, # can also pass VAE model instead
decoder_net=decoder_net, # of separate encoder and decoder
latent_dim=latent_dim,
samples=4)
# + colab={"base_uri": "https://localhost:8080/"} id="lD7FbfhGNT9p" outputId="ab3bd753-3f1e-4063-a48c-380779a63677"
print("Current threshold value is: ", od.threshold)
# + [markdown] id="BadWsGJUNT9p"
# train<br>
# rom alibi_detect.models.tensorflow.losses import elbo #evidence lower bound loss
# + colab={"base_uri": "https://localhost:8080/"} id="NsVPLQstNT9p" outputId="3ff01913-fce0-47d3-8a97-e33c296bdef7"
adam = tf.keras.optimizers.Adam(lr=1e-4)
# + colab={"base_uri": "https://localhost:8080/"} id="bguIxvZwNT9p" outputId="9928c7e9-8b79-4ce0-e8c1-c0dca2224fce"
od.fit(train,
optimizer = adam,
epochs=20,
batch_size=4,
verbose=True)
# + [markdown] id="Yn3V6alWNT9p"
# heck the threshold value. Should be the same as defined before.
# + colab={"base_uri": "https://localhost:8080/"} id="o3ANxCyXNT9p" outputId="cbfb8157-d02c-4f91-b847-9ffbdecc2abc"
print("Current threshold value is: ", od.threshold)
# + [markdown] id="O-G-1_fKNT9q"
# <br>
# nfer_threshold Updates threshold by a value inferred from the percentage of <br>
# nstances considered to be outliers in a sample of the dataset.<br>
# ercentage of X considered to be normal based on the outlier score.<br>
# ere, we set it to 99%
# + colab={"base_uri": "https://localhost:8080/"} id="boOWYIBNNT9q" outputId="0f4eae86-c0b6-4ca5-a08b-878673e732da"
od.infer_threshold(test, outlier_type='instance', threshold_perc=99.0)
print("Current threshold value is: ", od.threshold)
# + id="IJwqW_bnlViC"
from alibi_detect.utils.saving import save_detector, load_detector
save_detector(od, "saved_outlier_models/carpet_od_20epochs.h5")
od = load_detector("saved_outlier_models/carpet_od_20epochs.h5")
# + [markdown] id="yEa1GvTBNT9q"
# save the trained outlier detector<br>
# s mentioned in their documentation, save and load is having issues in python3.6 but works fine in 3.7<br>
# rom alibi_detect.utils import save_detector, load_detector<br>
# ave_detector(od, "saved_outlier_models/carpet_od_20epochs.h5")<br>
# d = load_detector(filepath)
# + [markdown] id="ampt2DiZNT9q"
# est our model on a bad image
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="LuvadR3dNT9q" outputId="233b9794-5a34-4a73-894a-dc199ca0b978"
img_num = 9
test_bad_image = bad_dataset[img_num].reshape(1, 64, 64, 3)
plt.imshow(test_bad_image[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="Xq1HkeatNT9r" outputId="6d18d4f4-723b-44f4-8293-942c30bd5e71"
test_bad_image_recon = od.vae(test_bad_image)
test_bad_image_recon = test_bad_image_recon.numpy()
plt.imshow(test_bad_image_recon[0])
# + id="pLO7EXAMNT9r"
test_bad_image_predict = od.predict(test_bad_image) #Returns a dictionary of data and metadata
# + [markdown] id="SazKGeM3NT9r"
# ata dictionary contains the instance_score, feature_score, and whether it is an outlier or not. <br>
# et u look at the values under the 'data' key in our output dictionary
# + colab={"base_uri": "https://localhost:8080/"} id="X0elwf9DNT9r" outputId="319840e1-ad58-440a-8ea0-3be67a345aed"
bad_image_instance_score = test_bad_image_predict['data']['instance_score'][0]
print("The instance score is:", bad_image_instance_score)
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="KufaDMmfNT9r" outputId="9c35c9e9-6678-4a01-9e19-b8332779cf34"
bad_image_feature_score = test_bad_image_predict['data']['feature_score'][0]
plt.imshow(bad_image_feature_score[:,:,0])
print("Is this image an outlier (0 for NO and 1 for YES)?", test_bad_image_predict['data']['is_outlier'][0])
# + [markdown] id="LkxjUrN4NT9r"
# ou can also manually define the threshold based on your specific use case.
# + colab={"base_uri": "https://localhost:8080/"} id="GwEPXK9CNT9r" outputId="a6f53a34-aea7-48f0-965d-b5de4de5c866"
od.threshold = 0.002
print("Current threshld value is: ", od.threshold)
# + [markdown] id="AmCiIQuBNT9s"
# et us check it for multiple images
# + id="OyurkNvdNT9s"
X = bad_dataset[:20]
# + id="sPeiGFiBNT9s"
od_preds = od.predict(X,
outlier_type='instance', # use 'feature' or 'instance' level
return_feature_score=True, # scores used to determine outliers
return_instance_score=True)
# + colab={"base_uri": "https://localhost:8080/"} id="V4wRfKinNT9s" outputId="1823f21c-2aea-4dbd-d001-1d5445b52a28"
print(list(od_preds['data'].keys()))
# + [markdown] id="53Jc5bBlNT9s"
# catter plot of instance scores. using the built-in function for the scatterplot.
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="ypSwycnkNT9s" outputId="fa035086-bbda-4a86-e5bb-337d1a1c5cad"
target = np.ones(X.shape[0],).astype(int) # Ground truth (all ones for bad images)
labels = ['normal', 'outlier']
plot_instance_score(od_preds, target, labels, od.threshold) #pred, target, labels, threshold
# + [markdown] id="bE9WNTtcNT9s"
# lot features for select images, using the built in function (plot_feature_outlier_image)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="N6FJarQrNT9s" outputId="73981be2-51c9-4508-c762-9fc0b06ce125"
X_recon = od.vae(X).numpy()
plot_feature_outlier_image(od_preds,
X,
X_recon=X_recon,
instance_ids=[0, 5, 10, 15, 17], # pass a list with indices of instances to display
max_instances=5, # max nb of instances to display
outliers_only=False) # only show outlier predictions
# + [markdown] id="WCEk9TuoNT9t"
# #####################################
| alibinetOD_condensada.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 19 11:45:20 2016
@author: johnguttag
"""
import pylab, random
import warnings
warnings.filterwarnings("ignore")
# ### Load Data
class Passenger(object):
featureNames = ('C1', 'C2', 'C3', 'age', 'male gender')
def __init__(self, pClass, age, gender, survived, name):
self.name = name
self.featureVec = [0, 0, 0, age, gender]
self.featureVec[pClass - 1] = 1
self.label = survived
self.cabinClass = pClass
def distance(self, other):
return minkowskiDist(self.featureVec, other.featureVec, 2)
def getClass(self):
return self.cabinClass
def getAge(self):
return self.featureVec[3]
def getGender(self):
return self.featureVec[4]
def getName(self):
return self.name
def getFeatures(self):
return self.featureVec[:]
def getLabel(self):
return self.label
def getTitanicData(fname):
data = {}
data['class'], data['survived'], data['age'] = [], [], []
data['gender'], data['name'] = [], []
f = open(fname)
line = f.readline()
while line != '':
split = line.split(',')
data['class'].append(int(split[0]))
data['age'].append(float(split[1]))
if split[2] == 'M':
data['gender'].append(1)
else:
data['gender'].append(0)
if split[3] == '1':
data['survived'].append('Survived')
else:
data['survived'].append('Died')
data['name'].append(split[4:])
line = f.readline()
return data
# +
def buildTitanicExamples(fileName):
data = getTitanicData(fileName)
examples = []
for i in range(len(data['class'])):
p = Passenger(data['class'][i], data['age'][i],
data['gender'][i], data['survived'][i],
data['name'][i])
examples.append(p)
print('Finishe processing', len(examples), 'passengers\n')
return examples
examples = buildTitanicExamples('TitanicPassengers.txt')
# -
# ### K-means
def minkowskiDist(v1, v2, p):
"""Assumes v1 and v2 are equal-length arrays of numbers
Returns Minkowski distance of order p between v1 and v2"""
dist = 0.0
for i in range(len(v1)):
dist += abs(v1[i] - v2[i])**p
return dist**(1/p)
def findNearest(name, exampleSet, metric):
for e in exampleSet:
if e.getName() == name:
example = e
break
curDist = None
for e in exampleSet:
if e.getName() != name:
if curDist == None or\
metric(example, e) < curDist:
nearest = e
curDist = metric(example, nearest)
return nearest
# +
def accuracy(truePos, falsePos, trueNeg, falseNeg):
numerator = truePos + trueNeg
denominator = truePos + trueNeg + falsePos + falseNeg
return numerator/denominator
def sensitivity(truePos, falseNeg):
try:
return truePos/(truePos + falseNeg)
except ZeroDivisionError:
return float('nan')
def specificity(trueNeg, falsePos):
try:
return trueNeg/(trueNeg + falsePos)
except ZeroDivisionError:
return float('nan')
def posPredVal(truePos, falsePos):
try:
return truePos/(truePos + falsePos)
except ZeroDivisionError:
return float('nan')
def negPredVal(trueNeg, falseNeg):
try:
return trueNeg/(trueNeg + falseNeg)
except ZeroDivisionError:
return float('nan')
# -
def getStats(truePos, falsePos, trueNeg, falseNeg, toPrint = True):
accur = accuracy(truePos, falsePos, trueNeg, falseNeg)
sens = sensitivity(truePos, falseNeg)
spec = specificity(trueNeg, falsePos)
ppv = posPredVal(truePos, falsePos)
if toPrint:
print(' Accuracy =', round(accur, 3))
print(' Sensitivity =', round(sens, 3))
print(' Specificity =', round(spec, 3))
print(' Pos. Pred. Val. =', round(ppv, 3))
return (accur, sens, spec, ppv)
def findKNearest(example, exampleSet, k):
kNearest, distances = [], []
#Build lists containing first k examples and their distances
for i in range(k):
kNearest.append(exampleSet[i])
distances.append(example.distance(exampleSet[i]))
maxDist = max(distances) #Get maximum distance
#Look at examples not yet considered
for e in exampleSet[k:]:
dist = example.distance(e)
if dist < maxDist:
#replace farther neighbor by this one
maxIndex = distances.index(maxDist)
kNearest[maxIndex] = e
distances[maxIndex] = dist
maxDist = max(distances)
return kNearest, distances
def KNearestClassify(training, testSet, label, k):
"""Assumes training & testSet lists of examples, k an int
Predicts whether each example in testSet has label
Returns number of true positives, false positives,
true negatives, and false negatives"""
truePos, falsePos, trueNeg, falseNeg = 0, 0, 0, 0
for testCase in testSet:
nearest, distances = findKNearest(testCase, training, k)
#conduct vote
numMatch = 0
for i in range(len(nearest)):
if nearest[i].getLabel() == label:
numMatch += 1
if numMatch > k//2: #guess label
if testCase.getLabel() == label:
truePos += 1
else:
falsePos += 1
else: #guess not label
if testCase.getLabel() != label:
trueNeg += 1
else:
falseNeg += 1
return truePos, falsePos, trueNeg, falseNeg
def leaveOneOut(examples, method, toPrint = True):
truePos, falsePos, trueNeg, falseNeg = 0, 0, 0, 0
for i in range(len(examples)):
testCase = examples[i]
trainingData = examples[0:i] + examples[i+1:]
results = method(trainingData, [testCase])
truePos += results[0]
falsePos += results[1]
trueNeg += results[2]
falseNeg += results[3]
if toPrint:
getStats(truePos, falsePos, trueNeg, falseNeg)
return truePos, falsePos, trueNeg, falseNeg
def split80_20(examples):
sampleIndices = random.sample(range(len(examples)),
len(examples)//5)
trainingSet, testSet = [], []
for i in range(len(examples)):
if i in sampleIndices:
testSet.append(examples[i])
else:
trainingSet.append(examples[i])
return trainingSet, testSet
def randomSplits(examples, method, numSplits, toPrint = True):
truePos, falsePos, trueNeg, falseNeg = 0, 0, 0, 0
random.seed(0)
for t in range(numSplits):
trainingSet, testSet = split80_20(examples)
results = method(trainingSet, testSet)
truePos += results[0]
falsePos += results[1]
trueNeg += results[2]
falseNeg += results[3]
getStats(truePos/numSplits, falsePos/numSplits,
trueNeg/numSplits, falseNeg/numSplits, toPrint)
return truePos/numSplits, falsePos/numSplits,\
trueNeg/numSplits, falseNeg/numSplits
knn = lambda training, testSet:\
KNearestClassify(training, testSet,
'Survived', 3)
# +
numSplits = 10
print('Average of', numSplits,
'80/20 splits using KNN (k=3)')
truePos, falsePos, trueNeg, falseNeg =\
randomSplits(examples, knn, numSplits)
print('Average of LOO testing using KNN (k=3)')
truePos, falsePos, trueNeg, falseNeg =\
leaveOneOut(examples, knn)
# -
# ### Logistic Regression
# +
import sklearn.linear_model
def buildModel(examples, toPrint = True):
featureVecs, labels = [],[]
for e in examples:
featureVecs.append(e.getFeatures())
labels.append(e.getLabel())
LogisticRegression = sklearn.linear_model.LogisticRegression
model = LogisticRegression().fit(featureVecs, labels)
if toPrint:
print('model.classes_ =', model.classes_)
for i in range(len(model.coef_)):
print('For label', model.classes_[1])
for j in range(len(model.coef_[0])):
print(' ', Passenger.featureNames[j], '=',
model.coef_[0][j])
return model
# -
def applyModel(model, testSet, label, prob = 0.5):
testFeatureVecs = [e.getFeatures() for e in testSet]
probs = model.predict_proba(testFeatureVecs)
truePos, falsePos, trueNeg, falseNeg = 0, 0, 0, 0
for i in range(len(probs)):
if probs[i][1] > prob:
if testSet[i].getLabel() == label:
truePos += 1
else:
falsePos += 1
else:
if testSet[i].getLabel() != label:
trueNeg += 1
else:
falseNeg += 1
return truePos, falsePos, trueNeg, falseNeg
def lr(trainingData, testData, prob = 0.5):
model = buildModel(trainingData, False)
results = applyModel(model, testData, 'Survived', prob)
return results
# ### Compare Models
random.seed(0)
numSplits = 10
print('Average of', numSplits, '80/20 splits LR')
truePos, falsePos, trueNeg, falseNeg =\
randomSplits(examples, lr, numSplits)
print('Average of LOO testing using LR')
truePos, falsePos, trueNeg, falseNeg =\
leaveOneOut(examples, lr)
#Look at weights
trainingSet, testSet = split80_20(examples)
model = buildModel(trainingSet, True)
#Look at changing prob
random.seed(0)
trainingSet, testSet = split80_20(examples)
model = buildModel(trainingSet, False)
print('Try p = 0.1')
truePos, falsePos, trueNeg, falseNeg =\
applyModel(model, testSet, 'Survived', 0.1)
getStats(truePos, falsePos, trueNeg, falseNeg)
print('Try p = 0.9')
truePos, falsePos, trueNeg, falseNeg =\
applyModel(model, testSet, 'Survived', 0.9)
getStats(truePos, falsePos, trueNeg, falseNeg)
def buildROC(trainingSet, testSet, title, plot = True):
model = buildModel(trainingSet, True)
xVals, yVals = [], []
p = 0.0
while p <= 1.0:
truePos, falsePos, trueNeg, falseNeg =\
applyModel(model, testSet,
'Survived', p)
xVals.append(1.0 - specificity(trueNeg, falsePos))
yVals.append(sensitivity(truePos, falseNeg))
p += 0.01
auroc = sklearn.metrics.auc(xVals, yVals, True)
if plot:
pylab.plot(xVals, yVals)
pylab.plot([0,1], [0,1])
title = title + '\nAUROC = ' + str(round(auroc,3))
pylab.title(title)
pylab.xlabel('1 - specificity')
pylab.ylabel('Sensitivity')
return auroc
random.seed(0)
trainingSet, testSet = split80_20(examples)
buildROC(trainingSet, testSet, 'ROC for Predicting Survival, 1 Split')
| models/logistic_regression/titanic_k_means_logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.environ['CUDA_VISIBLE_DEVICE'] = ''
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# %load_ext autoreload
# %autoreload 2
from importlib.util import find_spec
if find_spec("text_recognizer") is None:
import sys
sys.path.append('..')
from text_recognizer.data import IAMLines
# -
# !ulimit -n 65000
dataset = IAMLines(argparse.Namespace(augment_data='false'))
dataset.prepare_data()
dataset.setup()
print(dataset)
# +
def convert_y_label_to_string(y, dataset=dataset):
return ''.join([dataset.mapping[i] for i in y if i != 3])
convert_y_label_to_string(dataset.data_train[0][1])
# -
X, Y = next(iter(dataset.train_dataloader()))
for i in range(10):
plt.figure(figsize=(20, 20))
x, y = X[i], Y[i]
sentence = convert_y_label_to_string(y)
plt.matshow(x.squeeze(), cmap='gray', vmin=0, vmax=1)
plt.title(sentence)
| lab9/notebooks/03-look-at-iam-lines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exercise 4 - Transmon qubit
# ### Importing Packages
# +
# Import helper module from local folder
import sys
import os
sys.path.append(os.getcwd())
from resources import helper
# Numerical and plotting tools
import numpy as np
import matplotlib.pyplot as plt
# Import SI unit conversion factors
from resources.helper import GHz, MHz, kHz, us, ns
# Importing standard Qiskit libraries
from qiskit import IBMQ
from qiskit.tools.jupyter import *
from qiskit import pulse
from qiskit.pulse import Play, Schedule, DriveChannel
from qiskit.pulse import DriveChannel, Gaussian
from qiskit.tools.monitor import job_monitor
from resources.helper import SpecFitter
from qc_grader import grade_ex4
# -
# #### Account Setting
# Loading your IBM Quantum account
IBMQ.load_account()
IBMQ.providers() # see a list of providers you have access to
# +
# Get the special provider assigned to you using information from the output above
hub_name = 'iqc2021-4' # e.g. 'iqc2021-1'
group_name = 'challenge-124' # e.g. 'challenge-1'
project_name = 'ex4' # Your project name should be 'ex4'
provider = IBMQ.get_provider(hub=hub_name, group=group_name, project=project_name)
# Get `ibmq_jakarta` backend from the provider
backend_name = 'ibmq_jakarta'
backend = provider.get_backend(backend_name)
backend # See details of the `ibmq_jakarta` quantum system
# -
# #### --------------------------------------------------------------------------------------------------------------------
# ## 1. Instantiate channels and retrieve measurement schedule
#
# We will use the same measurement schedule throughout, whereas the drive schedules will vary. For any backend, we can ask for its default measurement pulse instead of calibrating it separately.
# Please use qubit 0 throughout the notebook
qubit = 0
# Next, save the backend configuration and the sampling time $dt$. We will exclude the `AcquireChannel`s when plotting for clarity.
backend_config = backend.configuration()
exc_chans = helper.get_exc_chans(globals())
dt = backend_config.dt
print(f"Sampling time: {dt*1e9} ns")
# The `instruction_schedule_map` provides the parameters of the default calibrated pulses used by the backend. In particular, we see that this "native gate set" consists of a subset of operations you can perform on a [`QuantumCircuit`](https://qiskit.org/documentation/apidoc/circuit.html#gates-and-instructions), and is the gate set that the `QuantumCircuit` is transpiled to by Qiskit.
backend_defaults = backend.defaults()
center_frequency = backend_defaults.qubit_freq_est
inst_sched_map = backend_defaults.instruction_schedule_map
inst_sched_map.instructions
# Retrieve calibrated measurement pulse from backend
meas = inst_sched_map.get('measure', qubits=[qubit])
meas.exclude(channels=exc_chans).draw(time_range=[0,1000])
asl = inst_sched_map.get('measure', qubits=[qubit]).duration
asl
# ### 1. |0> - |1>
# +
# The same spec pulse for both 01 and 12 spec
drive_amp = 0.25
drive_duration = inst_sched_map.get('x', qubits=[qubit]).duration
# Calibrated backend pulse use advanced DRAG pulse to reduce leakage to the |2> state.
# Here we will use simple Gaussian pulse
drive_sigma = drive_duration // 4 # DRAG pulses typically 4*sigma long.
spec_pulse = Gaussian(duration=drive_duration, amp=drive_amp,
sigma=drive_sigma, name=f"Spec drive amplitude = {drive_amp}")
# Construct an np array of the frequencies for our experiment
spec_freqs_GHz = helper.get_spec01_freqs(center_frequency, qubit)
# Create the base schedule
# Start with drive pulse acting on the drive channel
spec01_scheds = []
for freq in spec_freqs_GHz:
with pulse.build(name="Spec Pulse at %.3f GHz" % freq) as spec01_sched:
with pulse.align_sequential():
# Pay close attention to this part to solve the problem at the end
pulse.set_frequency(freq*GHz, DriveChannel(qubit))
pulse.play(spec_pulse, DriveChannel(qubit))
pulse.call(meas)
spec01_scheds.append(spec01_sched)
# Draw spec01 schedule
spec01_scheds[-1].exclude(channels=exc_chans).draw(time_range=[0,1000])
# +
from qiskit.tools.monitor import job_monitor
# Run the job on a real backend
spec01_job = backend.run(spec01_scheds, job_name="Spec 01", **helper.job_params)
print(spec01_job.job_id())
job_monitor(spec01_job)
# If the queuing time is too long, you can save the job id
# And retrieve the job after it's done
# Replace 'JOB_ID' with the the your job id and uncomment to line below
#spec01_job = backend.retrieve_job('JOB_ID')
# -
# ### Fit the Spectroscopy Data
#
# We will fit the spectroscopy signal to a *Lorentzian* function of the form
#
# $$ \frac{AB}{\pi[(f-f_{01})^2 + B^2]} + C $$
#
# to find the qubit frequency $f_{01}$ with these fitting parameters:
#
#
# Parameter | Corresponds to
# --- | ---
# $A$ | amplitude
# $f_{01}$ | 01 frequency guess (GHz)
# $B$ | scale
# $C$ | offset
#
#
# We will use the `SpecFitter` from the `helper` module that is based on the fitters from `qiskit.ignis.characterization.fitters` library.
#
# <div class="alert alert-block alert-danger">
#
# **Note:** You may need to modify the fitting parameters below to get a good fit.
#
# </div>
# +
from resources.helper import SpecFitter
amp_guess = 5e6
f01_guess = 5
B = 1
C = 0
fit_guess = [amp_guess, f01_guess, B, C]
fit = SpecFitter(spec01_job.result(), spec_freqs_GHz, qubits=[qubit], fit_p0=fit_guess)
fit.plot(0, series='z')
f01 = fit.spec_freq(0, series='z')
print("Spec01 frequency is %.6f GHz" % f01)
# -
# Let's compare your result with the calibrated qubit frequency from the backend! If things are working properly, your result should be very close to the calibrated value ($\pm$1 MHz).
# Retrieve qubit frequency from backend properties
f01_calibrated = backend.properties().frequency(qubit) / GHz
f01_error = abs(f01-f01_calibrated) * 1000 # error in MHz
print("Qubit frequency error is %.6f MHz" % f01_error)
# #### --------------------------------------------------------------------------------------------------------------------
# ### 2. Calibrate X-180 pulse amplitude using Rabi oscillation
# +
max_rabi_amp = 0.75
rabi_amps = helper.get_rabi_amps(max_rabi_amp)
rabi_scheds = []
for ridx, amp in enumerate(rabi_amps):
with pulse.build(name="rabisched_%d_0" % ridx) as sched: # '0' corresponds to Rabi
with pulse.align_sequential():
pulse.set_frequency(f01*GHz, DriveChannel(qubit))
rabi_pulse = Gaussian(duration=drive_duration, amp=amp, \
sigma=drive_sigma, name=f"Rabi drive amplitude = {amp}")
pulse.play(rabi_pulse, DriveChannel(qubit))
pulse.call(meas)
rabi_scheds.append(sched)
# Draw rabi schedule
rabi_scheds[-1].exclude(channels=exc_chans).draw(time_range=[0,1000])
# +
# Run the job on a real device
rabi_job = backend.run(rabi_scheds, job_name="Rabi", **helper.job_params)
print(rabi_job.job_id())
job_monitor(rabi_job)
# If the queuing time is too long, you can save the job id
# And retrieve the job after it's done
# Replace 'JOB_ID' with the the your job id and uncomment to line below
#rabi_job = backend.retrieve_job('JOB_ID')
# -
# ### Fit the Rabi Data
#
# We will fit the Rabi signal to a sinusoidal function of the form
#
# $$ a \cos(2\pi f x + \phi) + c $$
#
# to find the Rabi period $T = 2\pi/f$ with these fitting parameters:
#
#
# Parameter | Corresponds to
# --- | ---
# $a$ | amplitude
# $f$ | Rabi drive frequency
# $\phi$ | phase offset
# $c$ | offset
#
#
# We will use the `RabiFitter` from the `qiskit.ignis.characterization.calibration.fitters` library.
#
# <div class="alert alert-block alert-danger">
#
# **Note:** You may need to modify the fitting parameters below to get a good fit.
#
# </div>
# +
from qiskit.ignis.characterization.calibrations.fitters import RabiFitter
amp_guess = 5e7
fRabi_guess = 2
phi_guess = 0.5
c_guess = 0
fit_guess = [amp_guess, fRabi_guess, phi_guess, c_guess]
fit = RabiFitter(rabi_job.result(), rabi_amps, qubits=[qubit], fit_p0=fit_guess)
fit.plot(qind=0, series='0')
x180_amp = fit.pi_amplitude()
print("Pi amplitude is %.3f" % x180_amp)
# -
# #### --------------------------------------------------------------------------------------------------------------------
# ### 3. |1> - |2>
# +
# Define pi pulse
x_pulse = Gaussian(duration=drive_duration,
amp=x180_amp,
sigma=drive_sigma,
name='x_pulse')
def build_spec12_pulse_schedule(freq, anharm_guess_GHz):
with pulse.build(name="Spec Pulse at %.3f GHz" % (freq+anharm_guess_GHz)) as spec12_schedule:
with pulse.align_sequential():
# WRITE YOUR CODE BETWEEN THESE LINES - START
pulse.set_frequency(freq*GHz, DriveChannel(qubit))
pulse.play(x_pulse, DriveChannel(qubit))
pulse.set_frequency(freq*1.5*GHz, DriveChannel(qubit))
pulse.play(spec_pulse, DriveChannel(qubit))
pulse.call(meas)
# WRITE YOUR CODE BETWEEN THESE LINES - END
return spec12_schedule
# +
anharmonicity_guess_GHz = -0.35 # your anharmonicity guess
freqs_GHz = helper.get_spec12_freqs(f01, qubit)
print(freqs_GHz)
# Now vary the sideband frequency for each spec pulse
spec12_scheds = []
for freq in freqs_GHz:
spec12_scheds.append(build_spec12_pulse_schedule(freq, anharmonicity_guess_GHz))
# Draw spec12 schedule
spec12_scheds[-1].exclude(channels=exc_chans).draw(time_range=[0,1000])
# -
# ### 4. Backend
# +
# Run the job on a real device
spec12_job = backend.run(spec12_scheds, job_name="Spec 12", **helper.job_params)
print(spec12_job.job_id())
job_monitor(spec12_job)
# If the queuing time is too long, you can save the job id
# And retrieve the job after it's done
# Replace 'JOB_ID' with the the your job id and uncomment to line below
#spec12_job = backend.retrieve_job('JOB_ID')
# -
# ### Measurement
# ### Fit the Spectroscopy Data
#
# <div id='fit-f12'></div>
#
# We will again fit the spectroscopy signal to a Lorentzian function of the form
#
# $$ \frac{AB}{\pi[(f-f_{12})^2 + B^2]} + C $$
#
# to find the frequency of the $|1\rangle \to |2\rangle$ transition $f_{12}$ with these fitting parameters:
#
# Parameter | Corresponds to
# --- | ---
# $A$ | amplitude
# $f_{12}$ | 12 frequency guess (GHz)
# $B$ | scale
# $C$ | offset
#
#
# <div class="alert alert-block alert-danger">
#
# **Note:** You may need to modify the fitting parameters below to get a good fit.
#
# </div>
# +
amp_guess = 2e7 #2e7
f12_guess = f01 - 0.2 #0.2
B = 0.1 #0.1
C = 0
fit_guess = [amp_guess, f12_guess, B, C]
fit = SpecFitter(spec12_job.result(), freqs_GHz+anharmonicity_guess_GHz, qubits=[qubit], fit_p0=fit_guess)
fit.plot(0, series='z')
f12 = fit.spec_freq(0, series='z')
print("Spec12 frequency is %.6f GHz" % f12)
# +
amp_guess = 2e7 #2e7
f12_guess = f01 - 0.2 #0.2
B = 0.1 #0.1
C = 0
fit_guess = [amp_guess, f12_guess, B, C]
fit = SpecFitter(spec12_job.result(), freqs_GHz+anharmonicity_guess_GHz, qubits=[qubit], fit_p0=fit_guess)
fit.plot(0, series='z')
f12 = fit.spec_freq(0, series='z')
print("Spec12 frequency is %.6f GHz" % f12)
# -
# ### 5. Grading
grade_ex4(f12,qubit,backend_name)
| solutions by participants/ex4/ex4-PritamSinha.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MicroPython - /dev/ttyUSB1
# language: micropython
# name: micropython-ttusb1
# ---
from machine import Pin, ADC
from time import sleep
adc = ADC(Pin(35))#Pin(15, Pin.IN))
adc.atten(ADC.ATTN_11DB)
print(adc.read())
pin_pir = Pin(15, mode=Pin.IN)
pin_sound = Pin(4, mode=Pin.OUT)
pin_sound.value(0)
pin_sound.value(1)
print(pin_sound.value())
for i in range(1000):
is_pir = pin_pir.value()
#print(is_pir)
if is_pir == 1:
pin_sound.value(0)
else:
pin_sound.value(1)
sleep(0.1)
pin_sound.value(0)
print("a")
pin_sound.value(0)
| NOTEBOOKS/RCWL-0516.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Sentiment Analysis (SA) with Pre-trained Language Model (LM)
#
# In this notebook, we demonstrate how to analyze sentiments of IMDB reviews using a pre-trained language model.
#
# - Model definition
# - Data pipeline
# - Training and evaluation
# + [markdown] slideshow={"slide_type": "skip"}
# Now that we've covered some advanced topics, let's go back and show how these techniques can help us even when addressing the comparatively simple problem of classification. In particular, we'll look at the classic problem of sentiment analysis: taking an input consisting of a string of text and classifying its sentiment as positive of negative.
#
# In this notebook, we are going to use GluonNLP to build a sentiment analysis model whose weights are initialized based on a pretrained language model. Using pre-trained language model weights is a common approach for semi-supervised learning in NLP. In order to do a good job with language modeling on a large corpus of text, our model must learn representations that contain information about the structure of natural language. Intuitively, by starting with these good features, vs random features, we're able to converge faster upon a good model for our downsteam task.
#
# With GluonNLP, we can quickly prototype the model and it's easy to customize. The building process consists of just three simple steps. For this demonstration we'll focus on movie reviews from the Large Movie Review Dataset, also known as the IMDB dataset. Given a movie, our model will output prediction of its sentiment, which can be positive or negative.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Model Architecture
# + [markdown] slideshow={"slide_type": "notes"}
# We can easily transplant the pre-trained weights, we'll base our model architecture on the pre-trained LM. Following the LSTM layer, we have one representation vector for each word in the sentence. Because we plan to make a single prediction (not one per word), we'll first pool our predictions across time steps before feeding them through a dense layer to produce our final prediction (a single sigmoid output node).
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# <img src='samodel-v3.png' width='250px'>
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# Dense: make predictions
#
# Pooling: downsampling
#
# Embedding: from LM
#
# Encoder: from LM
# + [markdown] slideshow={"slide_type": "skip"}
#
# Specifically, our model represents input words by their embeddings. Following the embedding layer, our model consists of a two-layer LSTM, followed by an average pooling layer, followed by a sigmoid output layer (all illustrated in the figure above)
#
# Thus, given an input sequence, the memory cells in the LSTM layer will produce a representation sequence. This representation sequence is then averaged over all timesteps resulting in a fixed-length sentence representation $h$. Finally, we apply a sigmoid output layer on top of $h$. We’re using the sigmoid because we’re trying to predict if this text has positive or negative sentiment, and a sigmoid activation function squashes the output values to the range [0,1], allowing us to interpret this output as a probability.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Model Definition in GluonNLP
# + attributes={"classes": [], "id": "", "n": "1"} slideshow={"slide_type": "fragment"}
import multiprocessing as mp
import random
import time
import numpy as np
import mxnet as mx
from mxnet import gluon
import gluonnlp as nlp
import utils
random.seed(123)
np.random.seed(123)
mx.random.seed(123)
# + slideshow={"slide_type": "subslide"}
class MeanPoolingLayer(mx.gluon.HybridBlock):
"""A block for mean pooling of encoder features"""
def __init__(self, prefix=None, params=None):
super(MeanPoolingLayer, self).__init__(prefix=prefix, params=params)
def hybrid_forward(self, F, data, valid_length):
# Data will have shape (T, N, C)
masked_encoded = F.SequenceMask(data,
sequence_length=valid_length,
use_sequence_length=True)
agg_state = F.broadcast_div(F.sum(masked_encoded, axis=0),
F.expand_dims(valid_length, axis=1))
return agg_state
# + attributes={"classes": [], "id": "", "n": "11"} slideshow={"slide_type": "subslide"}
class SentimentNet(gluon.HybridBlock):
"""Network for sentiment analysis."""
def __init__(self, prefix=None, params=None):
super(SentimentNet, self).__init__(prefix=prefix, params=params)
with self.name_scope():
self.embedding = None # will set with lm embedding later
self.encoder = None # will set with lm encoder later
self.agg_layer = MeanPoolingLayer()
self.output = gluon.nn.HybridSequential()
with self.output.name_scope():
self.output.add(gluon.nn.Dense(1, flatten=False))
def hybrid_forward(self, F, data, valid_length):
# shape Shape = (T, N, C)
encoded = self.encoder(self.embedding(data))
agg_state = self.agg_layer(encoded, valid_length)
out = self.output(agg_state)
return out
# + [markdown] slideshow={"slide_type": "-"}
# ## Hyperparameters and Model Initialization
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Load Pre-trained Language Model
# + slideshow={"slide_type": "fragment"}
language_model_name = 'standard_lstm_lm_200'
pretrained = True
context = mx.gpu(0)
lm_model, vocab = nlp.model.get_model(name=language_model_name,
dataset_name='wikitext-2',
pretrained=pretrained,
ctx=context)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Create SA model from Pre-trained Model
# + [markdown] slideshow={"slide_type": "skip"}
# In the below code, we first acquire a pre-trained model on the Wikitext-2 dataset using nlp.model.get_model. We then construct a SentimentNet object, which takes as input the embedding layer and encoder of the pre-trained model.
#
# As we employ the pre-trained embedding layer and encoder, **we only need to initialize the output layer** using `net.out_layer.initialize(mx.init.Xavier(), ctx=context)`.
# + cell_style="center" slideshow={"slide_type": "fragment"}
learning_rate = 0.005
batch_size = 16
epochs = 1
net = SentimentNet()
net.embedding = lm_model.embedding
net.encoder = lm_model.encoder
net.hybridize()
# initialize only the output layer
net.output.initialize(mx.init.Xavier(), ctx=context)
loss = gluon.loss.SigmoidBCELoss()
trainer = gluon.Trainer(net.collect_params(),'ftml',
{'learning_rate': learning_rate})
# + cell_style="center" slideshow={"slide_type": "subslide"}
print(net)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Data Pipeline
# + [markdown] slideshow={"slide_type": "skip"}
# ### Preparation
# The data preprocessing logic depends on English spaCy tokenizer. If you are not running this example on the provided AMI, please add a cell and run the following command:
#
# ```bash
# # # !python -m spacy download en
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# ### A Glance at the Dataset
# + slideshow={"slide_type": "fragment"}
raw_train_dataset = nlp.data.IMDB(root='data/imdb', segment='train')
raw_test_dataset = nlp.data.IMDB(root='data/imdb', segment='test')
print('score:\t', raw_train_dataset[11][1])
print('\nreview:\n\n', raw_test_dataset[11][0])
# + [markdown] slideshow={"slide_type": "subslide"}
# We need to do the following to preprocess the dataset:
#
# - Tokenization
# - Label generation
# - Batching with bucketing
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Tokenization and Label Generation
# + slideshow={"slide_type": "fragment"}
# tokenizer takes as input a string and outputs a list of tokens.
tokenizer = nlp.data.SpacyTokenizer('en')
def preprocess(x):
# length_clip takes as input a list
# and outputs a list with maximum length 500.
length_clip = nlp.data.ClipSequence(500)
data, label = x
label = int(label > 5)
# A token index or a list of token indices is
# returned according to the vocabulary.
data = vocab[length_clip(tokenizer(data))]
return data, label
def get_length(x):
return float(len(x[0]))
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Preprocess the Dataset with Multi-processing
# + slideshow={"slide_type": "fragment"}
def preprocess_dataset(dataset):
start = time.time()
with mp.Pool() as pool:
# Each sample is processed in an asynchronous manner.
dataset = gluon.data.SimpleDataset(pool.map(preprocess, dataset))
lengths = gluon.data.SimpleDataset(pool.map(get_length, dataset))
end = time.time()
print('Done tokenization. Time = {:.2f}s, num sentences = {}'.format(end - start, len(dataset)))
return dataset, lengths
train_dataset, train_data_lengths = preprocess_dataset(raw_train_dataset)
test_dataset, test_data_lengths = preprocess_dataset(raw_test_dataset)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Batchify
# + [markdown] slideshow={"slide_type": "skip"}
# In the following code, we use FixedBucketSampler, which assigns each data sample to a fixed bucket based on its length. The bucket keys are either given or generated from the input sequence lengths and the number of buckets.
# + slideshow={"slide_type": "fragment"}
bucket_num, bucket_ratio = 10, 0.2
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0, ret_length=True),
nlp.data.batchify.Stack(dtype='float32'))
batch_sampler = nlp.data.sampler.FixedBucketSampler(
train_data_lengths,
batch_size=batch_size,
num_buckets=bucket_num,
ratio=bucket_ratio,
shuffle=True)
print(batch_sampler.stats())
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Data Loader
# + slideshow={"slide_type": "fragment"}
train_dataloader = gluon.data.DataLoader(dataset=train_dataset,
batch_sampler=batch_sampler,
batchify_fn=batchify_fn)
test_dataloader = gluon.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
batchify_fn=batchify_fn)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Training
# + slideshow={"slide_type": "fragment"}
def train(net, context, epochs):
for epoch in range(epochs):
train_avg_L, train_throughput = utils.train_one_epoch(epoch, trainer, train_dataloader,
net, loss, context)
test_avg_L, test_acc = utils.evaluate(net, test_dataloader, context)
print('[Epoch {}] train avg loss {:.6f}, test acc {:.2f}, '
'test avg loss {:.6f}, throughput {:.2f}K wps'.format(
epoch, train_avg_L, test_acc, test_avg_L, train_throughput))
# + slideshow={"slide_type": "fragment"}
train(net, context, epochs)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Evaluate with Reviews
# + slideshow={"slide_type": "fragment"}
sample = ['This', 'movie', 'is', 'amazing']
test_review = mx.nd.array(vocab[sample], ctx=context)
test_length = mx.nd.array([4], ctx=context)
net(test_review.reshape(-1, 1), test_length).sigmoid()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Practice
#
# - Try with a negative sample. Does the network correctly predict the sentiment?
# - Try re-initialize the network without pre-trained model. Does pre-trained model provide any advantage?
| 09_sentiment_analysis/sentiment_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import h5py
import hdf5_interface
import matplotlib.pyplot as plt
import requests
from bokeh.plotting import figure, show, output_file, output_notebook
from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label, Legend
# +
def hdf5_to_dataframe(hdf5_filename, location_name, panel_name):
"""This function extracts data from an HDF5 file and loads it into a pandas dataframe"""
#Load the HDF5 file data
hdf5_file = h5py.File('{}.h5'.format(hdf5_filename), 'r')
hdf5_location = hdf5_file.get(location_name)
panel_location = hdf5_location.get(panel_name)
dataframe = pd.DataFrame()
for keys in panel_location.keys():
dataframe[str(keys)] = panel_location[str(keys)]
dataframe['Month'] = dataframe['Month'].astype(int)
return dataframe
def daily_to_monthly_energy(file_name, location_name, panel_name):
solar_dataframe = hdf5_to_dataframe(file_name, location_name, panel_name)
new_dataframe = pd.DataFrame(columns = ['Year', 'Month', 'Energy', 'Interpolate'])
previous_month_tracker = solar_dataframe['Month'][0]
Sum = 0
interpolated = 0
j = 0
year_array = np.array(np.NaN)
month_array = np.array(np.NaN)
interpolation_array = np.array(np.NaN)
energy_array = np.array(np.NaN)
for i in range(len(solar_dataframe.index)):
if solar_dataframe['Month'][i] == previous_month_tracker:
Sum = Sum + solar_dataframe['Energy'][i]
interpolated = interpolated + solar_dataframe['Interpolate'][i]
else:
year_array = np.append(year_array, solar_dataframe['Year'][i])
month_array = np.append(month_array, solar_dataframe['Month'][i])
if interpolated > 0:
interpolation_array = np.append(interpolation_array, 1)
else:
interpolation_array = np.append(interpolation_array, 0)
energy_array = np.append(energy_array, Sum)
Sum = 0
interpolated = 0
j = j + 1
previous_month_tracker = solar_dataframe['Month'][i]
new_dataframe['Energy'] = energy_array.astype(int)
new_dataframe['Month'] = month_array.astype(int)
new_dataframe['Year'] = year_array.astype(int)
new_dataframe['Interpolate'] = interpolation_array.astype(int)
new_dataframe = new_dataframe.drop(0).reset_index(drop=True)
#new_dataframe['DC Capacity'][1] = solar_dataframe['DC Capacity'][0]
#new_dataframe['Location'][1] = solar_dataframe['Location'][0]
return new_dataframe
# +
def pvwatts_tmy2(lattitude,longitude):
# Get the data from the PV Watts --TMY2
list_parameters = {"formt": 'JSON', "api_key": "<KEY>", "system_capacity": 18, "module_type": 0, "losses": 14.08,
"array_type": 0, "tilt": 50, "azimuth": 180, "lat": lattitude, "lon": longitude, "dataset": 'tmy2'}
json_response = requests.get("https://developer.nrel.gov/api/pvwatts/v6", params = list_parameters).json()
TMY2 = pd.DataFrame(data = json_response['outputs'])
return TMY2
def pvwatts_tmy3(lattitude,longitude):
# Get the data from the PV Watts --TMY3
list_parameters = {"formt": 'JSON', "api_key": "<KEY>", "system_capacity": 18, "module_type": 0, "losses": 14.08,
"array_type": 0, "tilt": 50, "azimuth": 180, "lat": lattitude, "lon": longitude, "dataset": 'tmy3'}
json_response = requests.get("https://developer.nrel.gov/api/pvwatts/v6", params = list_parameters).json()
TMY3 = pd.DataFrame(data = json_response['outputs'])
return TMY3
# +
coordinate = pd.DataFrame(columns=['location','latitude','longitude'])
coordinate['location']=['Ambler-Shungnak-Kobuk','Anchorage','Bethel','Chickaloon',
'Deering','Denali Park','Fairbanks','Fort Yukon',
'Galena-Koyukuk-Ruby', 'Homer','Naknek','Noatak',
'Noorvik','Soldotna','Valdez','Wasilla-Palmer']
coordinate['latitude']=[66.995834, 61.193625, 60.794938, 61.823570,
66.069413, 63.537277, 64.838033, 66.571563,
64.782991, 59.652521, 58.728349, 67.570921,
66.836039, 60.486370, 61.128663, 61.582242]
coordinate['longitude']=[ -157.377096, -149.694974, -161.770716, -148.450442,
-162.766760, -150.985453, -147.668970, -145.250173,
-156.744933, -151.536496, -157.017444, -162.967490,
-161.041913, -151.060702, -146.353366, -149.441001]
coordinate
# -
my_file = h5py.File("solar_panel_data_alaska.h5", 'r')
def supplyinfo(file_name):
'''
input: file_name
oytput: a dataframe with 'location','#of installation','average_capacity',
'average_annual',TMY2_acannual,TMY3_acannual.
You can get the general information of Alaska solar system.
Those supply information is for the table in popup figure and secondary webpage.
'''
result = pd.DataFrame(columns = ['location','#of installation','average_capacity',
'average_annual','TMY2','TMY3'])
for i in range(len(coordinate)):
result.loc[i,'location'] = coordinate['location'][i]
location_name = coordinate['location'][i]
location_hdf5 = my_file.get(location_name)
location=pd.DataFrame(columns=['Date'])
a = []
ca = []
no=0
for name in location_hdf5:
no = no+1
capacity = location_hdf5[name].attrs.__getitem__("DC Capacity")
ca.append(capacity)
if location_hdf5[name].keys().__contains__('Day'):
base = daily_to_monthly_energy(file_name, location_name, name)
else:
base = hdf5_to_dataframe(file_name,location_name, name )
# read data
base = base.drop(['Year', 'Interpolate'], axis=1)
average = base.groupby('Month').mean()
summation = np.sum(average,axis=0)/capacity
a.append(float(summation))
result.loc[i,'average_capacity'] = np.mean(ca)
result.loc[i,'average_annual'] = np.mean(a)
result.loc[i,'#'] = no
# to test if each loation has TMY2 data
tmy2=pvwatts_tmy2(coordinate['latitude'][i],coordinate['longitude'][i])
if tmy2.empty == 1:
pass
else:
result.loc[i,'TMY2'] = tmy2['ac_annual'][0]/18
result.loc[i,'TMY3'] = pvwatts_tmy3(coordinate['latitude'][i],coordinate['longitude'][i])['ac_annual'][0]/18
return result
| supply_info.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import wikipedia
import xml.etree.ElementTree as ET
import re
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.model_selection import cross_val_score, KFold
import xgboost as xgb
from sklearn.metrics import r2_score
# %matplotlib inline
# -
df = pd.DataFrame({
'2016.1': {'r2': 0.08848302137815456, 'gpga': 0.029420825483034936, 'gpg': 0.03125, 'gpg nacional': .256},
'2016.2': {'r2': 0.41209630002356856, 'gpga': 0.06694782470318676, 'gpg': 0.13043478260869565, 'gpg nacional': .256},
'2017.1': {'r2': 0.5049975994122389, 'gpga': 0.043444155227593345, 'gpg': 0.13793103448275862, 'gpg nacional': .235},
'2017.2': {'r2': 0.4913662773787454, 'gpga': 0.06256895263497003, 'gpg': 0.10526315789473684, 'gpg nacional': .235},
'2018.1': {'r2': 0.5143317828072063, 'gpga': 0.1178968318575872, 'gpg': 0.2558139534883721, 'gpg nacional': .25},
'2018.2': {'r2': 0.4249461246402403, 'gpga': 0.06357722839110463, 'gpg': 0.24, 'gpg nacional': .25},
'2019.1': {'r2': 0.4466307579085217, 'gpga': 0.019110479453685797, 'gpg': 0.17647058823529413, 'gpg nacional': .27},
'2019.2': {'r2': 0.42061943853895944, 'gpga': 0.0266550842371591, 'gpg': 0.140625, 'gpg nacional': .27},
'2020.1': {'r2': 0.48440678836445183, 'gpga': 0.06923028368241625, 'gpg': 0.19415584415584415584, 'gpg nacional': .29},
})
df
df.drop('2016.1', axis=1).T.drop('r2', axis=1).plot.line(figsize=(10, 8));
| notebook/gpg-history.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import keras
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam # Adagrad and RMSprop
# +
n_pts = 500
np.random.seed(0)
Xa = np.array([np.random.normal(13, 2, n_pts),
np.random.normal(12, 2, n_pts)]).T
Xb = np.array([np.random.normal(8, 2, n_pts),
np.random.normal(6, 2, n_pts)]).T
X = np.vstack((Xa, Xb))
y = np.matrix(np.append(np.zeros(n_pts), np.ones(n_pts))).T
plt.scatter(X[:n_pts,0], X[:n_pts,1])
plt.scatter(X[n_pts:,0], X[n_pts:,1])
model = Sequential()
model.add(Dense(units=1, input_shape=(2,), activation='sigmoid'))
adam=Adam(lr = 0.1 )
model.compile(adam, loss='binary_crossentropy', metrics=['accuracy'])
h=model.fit(x=X, y=y, verbose=1, batch_size=50,epochs=500, shuffle='true')
plt.plot(h.history['acc'])
plt.legend(['accuracy'])
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.plot(h.history['loss'])
plt.legend(['loss'])
plt.title('loss')
plt.xlabel('epoch')
def plot_decision_boundary(X, y, model):
x_span = np.linspace(min(X[:,0]) - 1, max(X[:,0]) + 1)
y_span = np.linspace(min(X[:,1]) - 1, max(X[:,1]) + 1)
xx, yy = np.meshgrid(x_span, y_span)
xx_, yy_ = xx.ravel(), yy.ravel()
grid = np.c_[xx_, yy_]
pred_func = model.predict(grid)
z = pred_func.reshape(xx.shape)
plt.contourf(xx, yy, z)
plot_decision_boundary(X, y, model)
plt.scatter(X[:n_pts,0], X[:n_pts,1])
plt.scatter(X[n_pts:,0], X[n_pts:,1])
plot_decision_boundary(X, y, model)
plt.scatter(X[:n_pts,0], X[:n_pts,1])
plt.scatter(X[n_pts:,0], X[n_pts:,1])
x = 7.5
y = 5
point = np.array([[x, y]])
prediction = model.predict(point)
plt.plot([x], [y], marker='o', markersize=10, color="red")
print("prediction is: ",prediction)
# -
| venv/3Keras/Perceptron.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7
# language: python
# name: python3
# ---
# # Information Quantities for Decision Tree Induction
# + [markdown] tags=["remove-cell"]
# **CS5483 Data Warehousing and Data Mining**
# ___
# + deletable=false editable=false init_cell=true nbgrader={"cell_type": "code", "checksum": "3107fafbdc76cb005359349c7a880d8b", "grade": false, "grade_id": "init", "locked": true, "schema_version": 3, "solution": false, "task": false} slideshow={"slide_type": "-"}
# %reset -f
# %matplotlib inline
import dit
from dit.shannon import entropy, conditional_entropy, mutual_information
from IPython.display import Math
import matplotlib.pyplot as plt
# -
# In this notebook, we will use the [`dit` package](https://dit.readthedocs.io/en/latest/) to compute some basic information quantities used in the decision tree algorithms. A summary of Shannon's information measures and their relationships are given first.
# ## Information Measures
# The followings are the mathemtical definitions of *entropy* and *mutual information*:
# \begin{align}
# H(Y) &= E\left[\log \tfrac1{P_{Y}(Y)}\right] && \text{entropy of $Y$}\\
# H(Y|X) &= E\left[\log \tfrac1{P_{Y|X}(Y|X)}\right] && \text{conditional entropy of $Y$ given $X$}\\
# I(X;Y) &= E\left[\log \tfrac{P_{Y|X}(Y|X)}{P_Y(Y)}\right] && \text{mutual information of $X$ and $Y$}\\
# \end{align}
# These information quantities can be related using a *Venn Diagram*:
# <a title="KonradVoelkel, Public domain, via Wikimedia Commons" href="https://commons.wikimedia.org/wiki/File:Entropy-mutual-information-relative-entropy-relation-diagram.svg"><img width="512" alt="Entropy-mutual-information-relative-entropy-relation-diagram" src="https://upload.wikimedia.org/wikipedia/commons/d/d4/Entropy-mutual-information-relative-entropy-relation-diagram.svg"></a>
# \begin{align}
# H(X,Y)&=H(X)+H(Y|X) && \text{chain rule of entropy}\\
# &=H(Y)+H(X|Y)\\
# I(X;Y)&=H(Y)-H(Y|X) && \text{mutual information in terms of entropies}\\
# &=H(X)+H(Y)-H(X,Y)\\
# &=H(X)-H(X|Y)
# \end{align}
# ## Entropy
# Consider the following distribution:
# \begin{align}
# p_k=\begin{cases}
# \frac12 & k=0\\
# \frac14 & k=1,2\\
# 0 & \text{otherwise.}
# \end{cases}
# \end{align}
# +
p = dit.Distribution(['0', '1', '2', '3'], [1/2, 1/4, 1/4, 0])
plt.stem(p.outcomes,p.pmf,use_line_collection=True)
plt.xlabel('k')
plt.ylabel(r'$p_k$')
plt.ylim((0,1))
plt.show()
# -
Math(f'h(p_1,p_2,\dots)={entropy(p)}')
# ## Information Gain
# Consider the dataset $D$:
# |X1|X2|X3|X4|Y|
# |:---:|:---:|:---:|:---:|:-:|
# |0 |0 |0 |00 |0 |
# |0 |0 |0 |00 |0 |
# |0 |0 |1 |01 |1 |
# |1 |0 |1 |11 |1 |
# |0 |1 |0 |00 |2 |
# |1 |1 |0 |10 |2 |
# |1 |1 |1 |11 |3 |
# |1 |1 |1 |11 |3 |
# **How to determine which attribute is more informative?**
# First, create a uniform distribution over the instances in $D$.
d = dit.uniform([('0','0','0','00','0'),
('0','0','0','00','0'),
('0','0','1','01','1'),
('1','0','1','11','1'),
('0','1','0','00','2'),
('1','1','0','10','2'),
('1','1','1','11','3'),
('1','1','1','11','3')])
d.set_rv_names(('X1','X2','X3','X4','Y'))
d
# We can then calculate $\text{Info}(D)$ and $\text{Info}_{X_i}(D)$ for $i=\{1,2,3,4\}$ as the entropy $H(Y)$ and conditional entropies $H(Y|X_i)$'s respectively.
# +
InfoD = entropy(d,['Y'])
InfoX1D = conditional_entropy(d,['Y'],['X1'])
InfoX2D = conditional_entropy(d,['Y'],['X2'])
InfoX3D = conditional_entropy(d,['Y'],['X3'])
InfoX4D = conditional_entropy(d,['Y'],['X4'])
Math(r'''
\begin{{aligned}}
\text{{Info}}(D)&={}\\
\text{{Info}}_{{X_1}}(D)&={:.3g}\\
\text{{Info}}_{{X_2}}(D)&={:.3g}\\
\text{{Info}}_{{X_3}}(D)&={:.3g}\\
\text{{Info}}_{{X_4}}(D)&={:.3g}\\
\end{{aligned}}
'''.format(InfoD,InfoX1D,InfoX2D,InfoX3D,InfoX4D))
# -
# The information gain $\text{Gain}_{X_i}(D)$ can be calculated as the mutual information $I(X_i;Y):=H(Y)-H(Y|X_i)$.
# +
GainX1D = mutual_information(d,['X1'],['Y'])
GainX2D = mutual_information(d,['X2'],['Y'])
GainX3D = mutual_information(d,['X3'],['Y'])
GainX4D = mutual_information(d,['X4'],['Y'])
Math(r'''
\begin{{aligned}}
\text{{Gain}}_{{X_1}}(D)&={:.3g}\\
\text{{Gain}}_{{X_2}}(D)&={:.3g}\\
\text{{Gain}}_{{X_3}}(D)&={:.3g}\\
\text{{Gain}}_{{X_4}}(D)&={:.3g}\\
\end{{aligned}}
'''.format(GainX1D,GainX2D,GainX3D,GainX4D))
# -
# **Exercise** Which attribute gives the highest information gain? Should we choose it as the splitting attribute?
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "2e636a548f742243280ce43d03d91000", "grade": true, "grade_id": "infogain", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false}
# YOUR ANSWER HERE
# -
# ## Information Gain Ratio
# To normalize information gain properly, we first calculate $\text{SplitInfo}_{X_i}(D)$ as $H(X_i)$:
# +
SplitInfoX1D = entropy(d,['X1'])
SplitInfoX2D = entropy(d,['X2'])
SplitInfoX3D = entropy(d,['X3'])
SplitInfoX4D = entropy(d,['X4'])
Math(r'''
\begin{{aligned}}
\text{{SplitInfo}}_{{X_1}}(D)&={:.3g}\\
\text{{SplitInfo}}_{{X_2}}(D)&={:.3g}\\
\text{{SplitInfo}}_{{X_3}}(D)&={:.3g}\\
\text{{SplitInfo}}_{{X_4}}(D)&={:.3g}\\
\end{{aligned}}
'''.format(SplitInfoX1D,SplitInfoX2D,SplitInfoX3D,SplitInfoX4D))
# -
# Finally, to calculate the information gain ratios:
Math(r'''
\begin{{aligned}}
\frac{{\text{{Gain}}_{{X_1}}(D)}}{{\text{{SplitInfo}}_{{X_1}}(D)}}&={:.3g}\\
\frac{{\text{{Gain}}_{{X_2}}(D)}}{{\text{{SplitInfo}}_{{X_2}}(D)}}&={:.3g}\\
\frac{{\text{{Gain}}_{{X_3}}(D)}}{{\text{{SplitInfo}}_{{X_3}}(D)}}&={:.3g}\\
\frac{{\text{{Gain}}_{{X_4}}(D)}}{{\text{{SplitInfo}}_{{X_4}}(D)}}&={:.3g}\\
\end{{aligned}}
'''.format(GainX1D/SplitInfoX1D,GainX2D/SplitInfoX2D,GainX3D/SplitInfoX3D,GainX4D/SplitInfoX4D))
# **Exercise** Is $X_4$ a good splitting attribute? Why?
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "8e9d188661e23c703a0fd1cff8f3bd33", "grade": true, "grade_id": "ratio", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false}
# YOUR ANSWER HERE
| Tutorial3/Information Quantities for Decision Tree Induction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Navigating Files and Directories
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Overview:
#
# - **Teaching:** 20 min
# - **Exercises:** 10 min
#
# **Questions**
# - How can I move around on my computer?
# - How can I see what files and directories I have?
# - How can I specify the location of a file or directory on my computer?
#
# **Objectives**
# - Explain the similarities and differences between a file and a directory.
# - Translate an absolute path into a relative path and vice versa.
# - Construct absolute and relative paths that identify specific files and directories.
# - Explain the steps in the shell’s read-run-print cycle.
# - Identify the actual command, flags, and filenames in a command-line call.
# - Demonstrate the use of tab completion, and explain its advantages.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: Shell Commands
#
# In order to generate the material for this lesson, we make use of Jupyter Notebooks. More on them later ...
#
# Practically, this means we have to tell the notebook that we want to execute a shell or **bash** command. We do this with a line which begins:
#
# `%%bash2`
#
# You will not need to enter this command as you are already running bash in your terminal. So whenever you are instructed to type a command ignore this line.
# + [markdown] slideshow={"slide_type": "slide"}
# ## whoami
#
# When you log on you will see a **prompt** that looks something like:
#
# ```bash
# nbuser@nbserver:~$
# ```
#
# On some systems the first part of this will be your username as your username, but we are running in the notebooks terminal which has created a special user for us **nbuser** and the terminal session is running on a server called **nbserver**. Type the command `whoami`, then press the Enter key (sometimes marked Return) to send the command to the shell. The command’s output is the ID of the current user, i.e., it shows us who the shell thinks we are:
# + slideshow={"slide_type": "slide"}
# %%bash2 --dir ~
whoami
# + [markdown] slideshow={"slide_type": "slide"}
# More specifically, when we type whoami the shell:
#
# 1. finds a program called whoami,
# 2. runs that program,
# 3. displays that program’s output, then
# 4. displays a new prompt to tell us that it’s ready for more commands.
# + [markdown] slideshow={"slide_type": "slide"}
# In this lesson, we have used the username `nbuser` or `nelle` (associated with our hypothetical scientist Nelle) in example input and output throughout.
# However, when you type this lesson’s commands on your computer or another system, you could see and use something different, namely, the username associated with the user account on your computer. This username will be the output from `whoami`. On other systems, in what follows, `nbuser` or `nelle` should always be replaced by that username.
# + [markdown] slideshow={"slide_type": "slide"}
# ## The linux file system
#
# The part of the operating system responsible for managing files and directories is called the **file system**. It organizes our data into files, which hold information, and directories (also called "folders"), which hold files or other directories.
#
# Several commands are frequently used to create, inspect, rename, and delete files and directories. To start exploring them, we'll go to our open shell window.
# + [markdown] slideshow={"slide_type": "slide"}
# First let's find out where we are by running a command called `pwd` (which stands for "print working directory"). Directories are like places - at any time while we are using the shell we are in exactly one place, called our current working directory. Commands mostly read and write files in the **current(present) working directory**, i.e. "here", so knowing where you are before running a command is important. `pwd` shows you where you are:
# + slideshow={"slide_type": "fragment"}
# %%bash2
pwd
# + [markdown] slideshow={"slide_type": "fragment"}
# Here, the computer's response is as above, which the `data-shell` directory we extracted on the schedule page.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: Home Directory Variation
# The home directory path will look different on different operating systems. On Linux it may look like `/home/nelle`, and on Windows it will be similar to `C:\Documents and Settings\nelle` or `C:\Users\nelle`.
#
# (Note that it may look slightly different for different versions of Windows.) In all of our examples, we've used jupyter notebooks output as the default - Linux, Windows and Mac output will differ.
#
# Jupyter sets the working directory to the folder in which the notebook is stored, we will often overide this using magics, as discussed in the setup.
# + [markdown] slideshow={"slide_type": "subslide"}
# To understand what a "home directory" is, let's have a look at how the file system as a whole is organized. For the sake of this example, we'll be illustrating the filesystem on our scientist Nelle's computer. After this illustration, you'll be learning commands to explore your own filesystem, which will be constructed in a similar way, but not be exactly identical.
# + [markdown] slideshow={"slide_type": "subslide"}
# On Nelle's computer, the filesystem looks like this:
#
# 
#
# At the top is the root directory that holds everything else. We refer to it using a slash character, `/`, on its own; this is the leading slash in `/Users/nelle`.
# + [markdown] slideshow={"slide_type": "subslide"}
# Inside that directory are several other directories: `bin` (which is where some built-in programs are stored), `data` (for miscellaneous data files), `Users` (where users' personal directories are located), `tmp` (for temporary files that don't need to be stored long-term), and so on.
#
# We know that our current working directory `/Users/nelle` is stored inside `/Users` because `/Users` is the first part of its name. Similarly, we know that `/Users` is stored inside the root directory `/` because its name begins with `/`.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: Slashes
# Notice that there are two meanings for the `/` character. When it appears at the front of a file or directory name, it refers to the root directory. When it appears *inside* a name, it's just a separator.
# + [markdown] slideshow={"slide_type": "subslide"}
# Underneath `/Users`, we find one directory for each user with an account on Nelle's machine, her colleagues the Mummy and Wolfman.
#
# 
#
# The Mummy's files are stored in `/Users/imhotep`, Wolfman's in `/Users/larry`, and Nelle's in `Users/nelle`. Because Nelle is the user in our examples here, this is why we get `/Users/nelle` as our home directory.
# Typically, when you open a new command prompt you will be in your home directory to start.
#
# Now let's learn the command that will let us see the contents of our own filesystem. We can see what's in the `data-shell` directory by running `ls`, which stands for "listing":
# + slideshow={"slide_type": "subslide"}
# %%bash2
# ls
# + [markdown] slideshow={"slide_type": "subslide"}
# `ls` prints the names of the files and directories in the current directory. We can make its output more comprehensible by using the **flag** `-F` (also known as a **switch** or an **option**) , which tells `ls` to add a marker to file and directory names to indicate what they are. A trailing `/` indicates that this is a directory. Depending on your settings, it might also use colors to indicate whether each entry is a file or directory.
# + slideshow={"slide_type": "subslide"}
# %%bash2
# ls -F
# + [markdown] slideshow={"slide_type": "slide"}
# ## Getting help
#
# `ls` has lots of other flags. There are two common ways to find out how to use a command and what flags it accepts:
#
# 1. We can pass a `--help` flag to the command, such as: `ls --help`
# 2. We can read its manual with man, such as: `man -ls`
#
# **Depending on your environment you might find that only one of these works (either `man` or `--help`)**. We'll describe both ways below.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### The `--help` flag
#
# Many bash commands, and programs that people have written that can be run from within bash, support a `--help` flag to display more information on how to use the command or program.
# + slideshow={"slide_type": "subslide"}
# %%bash2
# ls --help
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: Unsupported command-line options
#
# If you try to use an option (flag) that is not supported, `ls` and other programs will usually print an error message similar to:
# ```bash
# # # ls -j
# ls: invalid option -- 'j'
# Try 'ls --help' for more information.
# ```
# .
# + slideshow={"slide_type": "subslide"}
# %%bash2
# ls -j
# + [markdown] slideshow={"slide_type": "slide"}
# ### The `man` command
# The other way to learn about ls is to type
# + slideshow={"slide_type": "subslide"}
# %%bash2
man ls
# + [markdown] slideshow={"slide_type": "subslide"}
# This will turn your terminal into a page with a description of the `ls` command and its options and, if you're lucky, some examples of how to use it.
#
# To navigate through the `man` pages, you may use `↑` and `↓` to move line-by-line, or try `B` and `Spacebar` to skip up and down by a full page. To search for a character or word in the man pages, use `/` followed by the character or word you are searching for.
#
# To **quit** the `man` pages, press `Q`.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: Manual pages on the web
# Of course there is a third way to access help for commands: searching the internet via your web browser. When using internet search, including the phrase `unix man page` in your search query will help to find relevant results.
#
# GNU provides links to its [manuals](http://www.gnu.org/manual/manual.html) including the [core GNU utilities](http://www.gnu.org/software/coreutils/manual/coreutils.html), which covers many commands introduced within this lesson.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercise: Exploring More `ls` Flags
# What does the command `ls` do when used with the `-l` and `-h` flags?
#
# Some of its output is about properties that we do not cover in this lesson (such as file permissions and ownership), but the rest should be useful nevertheless.
#
# [Solution]()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Solution: Exploring More ls Flags
# The `-l` flag makes `ls` use a long listing format, showing not only the file/directory names but also additional information such as the file size and the time of its last modification. The `-h` flag makes the file size "human readable", i.e. display something like `5.3K` instead of `5369`.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercise: Listing Recursively and By Time
# The command `ls -R` lists the contents of directories recursively, i.e., lists their sub-directories, sub-sub-directories, and so on at each level. The command `ls -t` lists things by time of last change, with most recently changed files or directories first. In what order does `ls -R -t` display things? Hint: `ls -l` uses a long listing format to view timestamps.
#
# [Solution]()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Solution: Listing Recursively and By Time
# The files/directories in each directory are sorted by time of last change.
# + [markdown] slideshow={"slide_type": "slide"}
# ## The data
#
# For the rest of this lesson we will make use of a collection of files that are in a zip file that we copied to our home direectory in the setup.
# + slideshow={"slide_type": "subslide"}
# %%bash2
# ls -l intro-linux
# + [markdown] slideshow={"slide_type": "slide"}
# We need change our location to a different directory, so we are no longer located in our home directory.
#
# The command to change locations is `cd` followed by a directory name to change our working directory. `cd` stands for “change directory”, which is a bit misleading: the command doesn’t change the directory, it changes the shell’s idea of what directory we are in.
#
# Let’s say we want to move to the directory we just created. We can use the following series of commands to get there:
# + slideshow={"slide_type": "slide"}
# %%bash2
# cd intro-linux
# + [markdown] slideshow={"slide_type": "slide"}
# Now our directory contains a zip file, `data-shell.zip`, (how can you check this?) which we need to unzip with the command:
# + slideshow={"slide_type": "subslide"}
# %%bash2
unzip data-shell.zip
# + [markdown] slideshow={"slide_type": "slide"}
# Now we can check the contents of the zip by running `ls`:
# + slideshow={"slide_type": "slide"}
# %%bash2
# ls
# + [markdown] slideshow={"slide_type": "slide"}
# Unzipping the file has created a new directory `data-shell`. As before we can check the contents of this with:
# + slideshow={"slide_type": "slide"}
# %%bash2
# ls -F data-shell
# + [markdown] slideshow={"slide_type": "slide"}
# Here, we can see that our `data-shell` directory contains mostly sub-directories. Any names in your output that don't have trailing slashes, are plain old files. And note that there is a space between `ls` and `-F`: without it, the shell thinks we're trying to run a command called `ls-F`, which doesn't exist.
# + [markdown] slideshow={"slide_type": "subslide"}
# We can also use ls to see the contents of a different directory. Let's take a look at our `data` directory by running `ls -F data`, i.e., the command `ls` with the `-F` flag and the argument `data-shell`. The argument `data` tells `ls` that we want a listing of something other than our current working directory:
# + slideshow={"slide_type": "fragment"}
# %%bash2
# ls -F data-shell
# -
# ## Information: Parameters and Arguments
#
# According to Wikipedia, the terms argument and parameter mean slightly different things. In practice, however, most people use them interchangeably to refer to the input term(s) given to a command.
#
# `ls` is the command, `-lh` are the flags (also called options), and `data-shell` is the argument.
# + [markdown] slideshow={"slide_type": "subslide"}
#
# Your output should be a list of all the files in the `data-shell` directory. This is the directory that you downloaded and extracted at the start of the lesson. If the directory does not exist, check that you have downloaded it to the correct folder.
#
# As you may now see, using a bash shell is strongly dependent on the idea that your files are organized in a hierarchical file system. Organizing things hierarchically in this way helps us keep track of our work: it’s possible to put hundreds of files in our home directory, just as it’s possible to pile hundreds of printed papers on our desk, but it’s a self-defeating strategy.
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's say we want to move to the `data` directory we saw above. We can use the following commands to get there:
# + slideshow={"slide_type": "fragment"}
# %%bash2
# cd data-shell
# cd data
# + [markdown] slideshow={"slide_type": "subslide"}
# These commands will move us from our home directory to the `data-shell` directory and then into the `data` directory. `cd` doesn't print anything, but if we run `pwd` after it, we can see that we are now in `/Users/nelle/Desktop/data-shell/data`. If we run ls without arguments now, it lists the contents of `/Users/nelle/Desktop/data-shell/data`, because that's where we now are:
# + slideshow={"slide_type": "fragment"}
# %%bash2
pwd
# + slideshow={"slide_type": "subslide"}
# %%bash2
# ls -F
# + [markdown] slideshow={"slide_type": "slide"}
# We can do this again and change directory into the `elements` directory and use `pwd` to verify that we have changed into another directory.
# + slideshow={"slide_type": "fragment"}
# %%bash2
# cd elements
pwd
# + [markdown] slideshow={"slide_type": "slide"}
# We now know how to go down the directory tree, but how do we go up? We might try the following:
# + slideshow={"slide_type": "fragment"}
# %%bash2
# cd data
# + [markdown] slideshow={"slide_type": "subslide"}
# But we get an error! Why is this?
#
# With our methods so far, `cd` can only see sub-directories inside your current directory. There are different ways to see directories above your current location; we'll start with the simplest.
# + [markdown] slideshow={"slide_type": "subslide"}
# There is a shortcut in the shell to move up one directory level that looks like this:
# + slideshow={"slide_type": "fragment"}
# %%bash2
# cd ..
# + [markdown] slideshow={"slide_type": "subslide"}
# `..` is a special directory name meaning "the directory containing this one", or more succinctly, the parent of the current directory. Sure enough, if we run `pwd` after running `cd ..`, we're back in `/Users/nelle/Desktop/data-shell/data`:
# + slideshow={"slide_type": "fragment"}
# %%bash2
pwd
# + [markdown] slideshow={"slide_type": "subslide"}
# The special directory `..` doesn't usually show up when we run `ls`. If we want to display it, we can give `ls` the `-a` flag:
# + slideshow={"slide_type": "fragment"}
# %%bash2
# ls -F -a
# + [markdown] slideshow={"slide_type": "subslide"}
# `-a` stands for "show all"; it forces ls to show us file and directory names that begin with `.`, such as `..` (which, if we're in `/Users/nelle`, refers to the `/Users` directory) As you can see, it also displays another special directory that's just called `.`, which means "the current working directory". It may seem redundant to have a name for it, but we'll see some uses for it soon.
#
# Note that in most command line tools, multiple flags can be combined with a single `-` and no spaces between the flags: `ls -F -a` is equivalent to `ls -Fa`.
# + [markdown] slideshow={"slide_type": "subslide"}
# We can get back to the `data-shell` directory the same way:
# + slideshow={"slide_type": "fragment"}
# %%bash2
# cd ..
pwd
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: Other Hidden Files
# In addition to the hidden directories `..` and `.`, you may also see a file called `.bash_profile`. This file usually contains shell configuration settings. You may also see other files and directories beginning with `.`. These are usually files and directories that are used to configure different programs on your computer. The prefix `.` is used to prevent these configuration files from cluttering the terminal when a standard `ls` command is used.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: Orthogonality
# The special names `.` and `..` don't belong to `cd`; they are interpreted the same way by every program. For example, if we are in `/Users/nelle/data`, the command `ls ..` will give us a listing of `/Users/nelle`. When the meanings of the parts are the same no matter how they're combined, programmers say they are **orthogonal**: Orthogonal systems tend to be easier for people to learn because there are fewer special cases and exceptions to keep track of.
# + [markdown] slideshow={"slide_type": "slide"}
# Let's try returning to the `elements` directory in the `data` direcotry, which we saw above. Last time, we used two commands, but we can actually string together the list of directories to move to `elements` in one step:
# + slideshow={"slide_type": "fragment"}
# %%bash2
# cd data/elements
# + [markdown] slideshow={"slide_type": "subslide"}
# Check that we've moved to the right place by running `pwd` and `ls -F`
#
# If we want to move up one level from the data directory, we could use `cd ..`. But there is another way to move to any directory, regardless of your current location.
# + [markdown] slideshow={"slide_type": "subslide"}
# So far, when specifying directory names, or even a directory path (as above), we have been using **relative paths**. When you use a relative path with a command like `ls` or `cd`, it tries to find that location from where we are, rather than from the root of the file system.
# + [markdown] slideshow={"slide_type": "subslide"}
# However, it is possible to specify the **absolute path** to a directory by including its entire path from the root directory, which is indicated by a leading slash. The leading `/` tells the computer to follow the path from the root of the file system, so it always refers to exactly one directory, no matter where we are when we run the command.
# + [markdown] slideshow={"slide_type": "subslide"}
# This allows us to move to our `data-shell` directory from anywhere on the filesystem (including from inside `data`). To find the absolute path we're looking for, we can use `pwd` and then extract the piece we need to move to `data-shell`.
# + slideshow={"slide_type": "fragment"}
# %%bash2
pwd
# + [markdown] slideshow={"slide_type": "subslide"}
# ```bash
# # # %%bash2
# # # cd /home/nbuser/library/data/data-shell/
# ```
# Don't execute this command, as the absolute path may be different for you! Instead select the output from your `pwd` command up to and including `data-shell`.
# + [markdown] slideshow={"slide_type": "subslide"}
# Run `pwd` and `ls -F` to ensure that we're in the directory we expect.
#
# These then, are the basic commands for navigating the filesystem on your computer: `pwd`, `ls` and `cd`. Let's explore some variations on those commands.
# + [markdown] slideshow={"slide_type": "slide"}
# What happens if you type `cd` on its own, without giving a directory?
# + slideshow={"slide_type": "fragment"}
# %%bash2
# cd
# + [markdown] slideshow={"slide_type": "fragment"}
# How can you check what happened? `pwd` gives us the answer!
# + slideshow={"slide_type": "fragment"}
# %%bash2
pwd
# + [markdown] slideshow={"slide_type": "subslide"}
# It turns out that `cd` without an argument will return you to your home directory, which is great if you've gotten lost in your own filesystem.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: Two More Shortcuts
# The shell interprets the character `~` (tilde) at the start of a path to mean "the current user's home directory". For example, if Nelle's home directory is `/Users/nelle`, then `~/data` is equivalent to `/Users/nelle/data`. This only works if it is the first character in the path: `here/there/~/elsewhere` is not `here/there/Users/nelle/elsewhere`.
#
# Another shortcut is the `-` (dash) character. `cd` will translate `-` into the previous directory I was in, which is faster than having to remember, then type, the full path. This is a very efficient way of moving back and forth between directories. The difference between `cd ..` and `cd -` is that the former brings you up, while the latter brings you back. You can think of it as the Last Channel button on a TV remote.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercise: Absolute vs Relative Paths
# Starting from `/Users/amanda/data/`, which of the following commands could Amanda use to navigate to her home directory, which is `/Users/amanda`?
# 1. `cd .`
# 2. `cd /`
# 3. `cd /home/amanda`
# 4. `cd ../..`
# 5. `cd ~`
# 6. `cd home`
# 7. `cd ~/data/..`
# 8. `cd`
# 9. `cd ..`
#
# [Solution]()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Solution: Absolute vs Relative Paths
# 1. No: `.` stands for the current directory.
# 2. No: `/` stands for the root directory.
# 3. No: Amanda's home directory is `/Users/amanda`.
# 4. No: this goes up two levels, i.e. ends in `/Users`.
# 5. Yes: `~` stands for the user's home directory, in this case `/Users/amanda`.
# 6. No: this would navigate into a directory home in the current directory if it exists.
# 7. Yes: unnecessarily complicated, but correct.
# 8. Yes: shortcut to go back to the user's home directory.
# 9. Yes: goes up one level.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercise: Relative Path Resolution
# Using the filesystem diagram below, if pwd displays `/Users/thing`, what will `ls -F ../backup` display?
# 1. `../backup: No such file or directory`
# 2. `2012-12-01 2013-01-08 2013-01-27`
# 3. `2012-12-01/ 2013-01-08/ 2013-01-27/`
# 4. `original/ pnas_final/ pnas_sub/`
# 
#
# [Solution]()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Solution: Relative Path Resolution
# 1. No: there is a directory backup in `/Users`.
# 2. No: this is the content of `Users/thing/backup`, but with `..` we asked for one level further up.
# 3. No: see previous explanation.
# 4. Yes: `../backup/` refers to `/Users/backup/`.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercise: `ls` Reading Comprehension
# Assuming a directory structure as in the above Figure (File System for Challenge Questions), if `pwd` displays `/Users/backup`, and `-r` tells ls to display things in reverse order, what command will display:
# ```bash
# pnas_sub/ pnas_final/ original/
# ```
# 1. `ls pwd`
# 2. `ls -r -F`
# 3. `ls -r -F /Users/backup`
# 4. Either #2 or #3 above, but not #1.
#
# [Solution]()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Solution: `ls` Reading Comprehension
# 1. No: `pwd` is not the name of a directory.
# 2. Yes: `ls` without directory argument lists files and directories in the current directory.
# 3. Yes: uses the absolute path explicitly.
# 4. Correct: see explanations above.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Nelle's Pipeline: Organizing Files
#
# Knowing just this much about files and directories, Nelle is ready to organize the files that the protein assay machine will create. First, she creates a directory called `north-pacific-gyre` (to remind herself where the data came from). Inside that, she creates a directory called `2012-07-03`, which is the date she started processing the samples. She used to use names like `conference-paper` and `revised-results`, but she found them hard to understand after a couple of years. (The final straw was when she found herself creating a directory called `revised-revised-results-3`.)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: Sorting Output
# Nelle names her directories "year-month-day", with leading zeroes for months and days, because the shell displays file and directory names in alphabetical order. If she used month names, December would come before July; if she didn't use leading zeroes, November ('11') would come before July ('7'). Similarly, putting the year first means that June 2012 will come before June 2013.
# + [markdown] slideshow={"slide_type": "slide"}
# Each of her physical samples is labelled according to her lab's convention with a unique ten-character ID, such as "NENE01729A". This is what she used in her collection log to record the location, time, depth, and other characteristics of the sample, so she decides to use it as part of each data file's name. Since the assay machine's output is plain text, she will call her files `NENE01729A.txt`, `NENE01812A.txt`, and so on. All 1520 files will go into the same directory.
# + [markdown] slideshow={"slide_type": "subslide"}
# Now in her current directory data-shell, Nelle can see what files she has using the command:
#
# `ls north-pacific-gyre/2012-07-03/`
#
# This is a lot to type, but she can let the shell do most of the work through what is called **tab completion**. If she types:
#
# `ls nor`
#
# and then presses tab (the tab key on her keyboard), the shell automatically completes the directory name for her:
#
# `ls north-pacific-gyre/`
#
# If she presses tab again, Bash will add `2012-07-03/` to the command, since it's the only possible completion. Pressing tab again does nothing, since there are 19 possibilities; pressing tab twice brings up a list of all the files, and so on. This is called **tab completion**, and we will see it in many other tools as we go on.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information: linux.bath and the ‘H-drive’
#
# Your results may be slightly different depending on how you have customized your filesystem.
# From the Windows taskbar open a File explorer window and open the ‘H-drive’ H: What do you notice about the output of ls and the contents of H:
# Also compare the output of ls dos and the contents of your Documents folder in Windows File Explorer.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Key Points:
#
# - The file system is responsible for managing information on the disk.
# - Information is stored in files, which are stored in directories (folders).
# - Directories can also store other directories, which forms a directory tree.
# - `cd path` changes the current working directory.
# - `ls path` prints a listing of a specific file or directory; `ls` on its own lists the current working directory.
# - `pwd` prints the user's current working directory.
# - `/` on its own is the root directory of the whole file system.
# - A relative path specifies a location starting from the current location.
# - An absolute path specifies a location from the root of the file system.
# - Directory names in a path are separated with `/` on Unix, but `\` on Windows.
# - `..` means 'the directory above the current one'; `.` on its own means 'the current directory'.
# - Most files' names are something.extension. The extension isn't required, and doesn't guarantee anything, but is normally used to indicate the type of data in the file.
| nbplain/03_navigatingFD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.010148, "end_time": "2020-04-08T03:41:31.983918", "exception": false, "start_time": "2020-04-08T03:41:31.973770", "status": "completed"} tags=[]
# # COVID-19 Status in Australia
# > Tracking new, confirmed, and recovered cases and deaths by state.
#
# - comments: true
# - author: IamAshKS
# - categories: [overview, interactive, australia]
# - hide: true
# - permalink: /covid-overview-australia/
# + papermill={"duration": 0.392422, "end_time": "2020-04-08T03:41:32.382598", "exception": false, "start_time": "2020-04-08T03:41:31.990176", "status": "completed"} pycharm={"is_executing": false, "name": "#%%\n"} tags=[]
#hide
from IPython.display import HTML
from pathlib import Path
import jinja2 as jj
import numpy as np
import pandas as pd
import requests as rq
# + papermill={"duration": 0.017593, "end_time": "2020-04-08T03:41:32.406055", "exception": false, "start_time": "2020-04-08T03:41:32.388462", "status": "completed"} pycharm={"is_executing": false, "name": "#%%\n"} tags=[]
#hide
def do_dev_tasks(html):
file_url = '.local'
if Path(file_url).is_dir():
with open(f'{file_url}/index.html', 'w') as f:
f.write(html)
def get_dataframe(name):
data_url = ('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/'
f'csse_covid_19_time_series/time_series_covid19_{name}_global.csv')
return pd.read_csv(data_url)
def get_css_asset(name):
data_url = f'https://raw.githubusercontent.com/iamashks/covid19-australia/master/assets/css/{name}.css'
file_url = f'assets/css/{name}.css'
if Path(file_url).is_file():
asset = f'<link rel="stylesheet" type="text/css" href="../{file_url}" />\n'
else:
asset = f'<style>{rq.get(data_url).text}</style>'
return asset
def get_template(name):
data_url = f'https://raw.githubusercontent.com/iamashks/covid19-australia/master/templates/{name}.html'
file_url = f'templates/{name}.html'
if Path(file_url).is_file():
templateLoader = jj.FileSystemLoader(searchpath='./')
templateEnv = jj.Environment(loader=templateLoader)
template = templateEnv.get_template(file_url)
else:
template = jj.Template(rq.get(data_url).text)
return template
# + papermill={"duration": 0.262797, "end_time": "2020-04-08T03:41:32.674360", "exception": false, "start_time": "2020-04-08T03:41:32.411563", "status": "completed"} pycharm={"is_executing": false, "name": "#%%\n"} tags=[]
#hide
COL_COUNTRY = 'Country/Region'
COL_STATE = 'Province/State'
COUNTRY = 'Australia'
dft_confirm = get_dataframe('confirmed')
dft_confirm = dft_confirm[dft_confirm[COL_COUNTRY] == COUNTRY]
dft_demised = get_dataframe('deaths')
dft_demised = dft_demised[dft_demised[COL_COUNTRY] == COUNTRY]
dft_recover = get_dataframe('recovered')
dft_recover = dft_recover[dft_recover[COL_COUNTRY] == COUNTRY]
# + papermill={"duration": 0.056274, "end_time": "2020-04-08T03:41:32.736942", "exception": false, "start_time": "2020-04-08T03:41:32.680668", "status": "completed"} pycharm={"is_executing": false, "name": "#%%\n"} tags=[]
#hide
COL_TODAY = dft_confirm.columns[-1]
COL_1DAY = dft_confirm.columns[-1 - 1]
COL_5DAY = dft_confirm.columns[-1 - 5]
COL_50DAY = dft_confirm.columns[-1 - 50]
df_table = pd.DataFrame({'State': dft_confirm[COL_STATE], 'Cases': dft_confirm[COL_TODAY],
'Recover': dft_recover[COL_TODAY], 'Deaths': dft_demised[COL_TODAY]})
df_table['Cases (5D)'] = np.array(dft_confirm[COL_TODAY]) - np.array(dft_confirm[COL_5DAY])
df_table['Recover (5D)'] = np.array(dft_recover[COL_TODAY]) - np.array(dft_recover[COL_5DAY])
df_table['Deaths (5D)'] = np.array(dft_demised[COL_TODAY]) - np.array(dft_demised[COL_5DAY])
df_table['Cases (1D)'] = np.array(dft_confirm[COL_TODAY]) - np.array(dft_confirm[COL_1DAY])
df_table['Recover (1D)'] = np.array(dft_recover[COL_TODAY]) - np.array(dft_recover[COL_1DAY])
df_table['Deaths (1D)'] = np.array(dft_demised[COL_TODAY]) - np.array(dft_demised[COL_1DAY])
df_table['Fatality Rate'] = (100 * df_table['Deaths'] / df_table['Cases']).round(1)
df_table = df_table.sort_values(by=['Cases', 'Deaths'], ascending=[False, False])
df_table = df_table.reset_index()
df_table.index += 1
del df_table['index'] # del duplicate index
df_table.head(8)
# + papermill={"duration": 0.035454, "end_time": "2020-04-08T03:41:32.778893", "exception": false, "start_time": "2020-04-08T03:41:32.743439", "status": "completed"} pycharm={"is_executing": false, "name": "#%%\n"} tags=[]
#hide
dt_cols = dft_confirm.columns[~dft_confirm.columns.isin([COL_STATE, COL_COUNTRY, 'Lat', 'Long'])]
dft_cases = dft_confirm.groupby(COL_STATE)[dt_cols].sum()
dft_cases_new = dft_cases.diff(axis=1).fillna(0).astype(int)
include_cols = ['Cases', 'Recover', 'Deaths', 'Cases (5D)', 'Recover (5D)', 'Deaths (5D)']
summary_nsw = df_table[df_table['State'].eq('New South Wales')][include_cols].sum().add_prefix('NSW ')
summary_vic = df_table[df_table['State'].eq('Victoria')][include_cols].sum().add_prefix('VIC ')
summary_qld = df_table[df_table['State'].eq('Queensland')][include_cols].sum().add_prefix('QLD ')
summary_time = {'updated': pd.to_datetime(COL_TODAY), 'since': pd.to_datetime(COL_5DAY)}
summary = {**summary_time, **df_table[include_cols].sum(), **summary_nsw, **summary_vic, **summary_qld}
summary
# + papermill={"duration": 0.302402, "end_time": "2020-04-08T03:41:33.088109", "exception": false, "start_time": "2020-04-08T03:41:32.785707", "status": "completed"} pycharm={"is_executing": false, "name": "#%%\n"} tags=[]
#hide_input
html_text = get_template('overview').render(D=summary, table=df_table, newcases=dft_cases_new,
np=np, pd=pd, enumerate=enumerate)
html_text = f'<div>{get_css_asset("keen")}{html_text}</div>'
do_dev_tasks(html=html_text)
HTML(html_text)
# + [markdown] papermill={"duration": 0.008899, "end_time": "2020-04-08T03:41:33.106625", "exception": false, "start_time": "2020-04-08T03:41:33.097726", "status": "completed"} tags=[]
# Visualizations by [<NAME> (Ash)](https://ashks.com/)[^1][^2].
#
# [^1]: Notebook: [Original](https://go.aksingh.net/covid-19-au) and [Source](https://github.com/iamashks/covid19-australia/)
# [^2]: Data Source: [2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE](https://github.com/CSSEGISandData/COVID-19)
| _notebooks/2020-03-30-covid19-overview-australia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Milestone 5
#
# ### Name : <NAME>
# ### Course : DSC540
# ### Instructor : <NAME>
# ### Date : 01 June 2021
# #### Merging the Data and Storing in a Database/Visualizing Data
#
# ### Source
# #### source 1. https://www.kaggle.com/josephassaker/covid19-global-dataset
# #### source 2. https://corona.lmao.ninja/v2/continents?yesterday=true&sort=
# #### source 3 . https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/latest/owid-covid-latest.json
## Setting up environemnt
import os
os.getcwd()
import sys
sys.path.append('../data')
## loading required libraries/modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sqlite3 as sql
# %matplotlib inline
## source 1. https://www.kaggle.com/josephassaker/covid19-global-dataset
df_csv = pd.read_csv("data/worldometer_coronavirus_summary_data.csv")
df_csv.rename(columns = {'continent':'region'}, inplace = True)
df_csv.replace('Czech Republic', 'Czechia')
df_csv['country'] = df_csv['country'].replace(['Czech Republic'],'Czechia')
df_csv
## source 2. https://www.kaggle.com/josephassaker/covid19-global-dataset
df_html = pd.read_csv("data/WHO_COVID_19.csv")
# referring github source for json dataset
#url = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/latest/owid-covid-latest.json"
url="data/Covid19.json"
df=pd.read_json(url)
# Transforming data for appropriate structure
df_t=df.T
# selecting required columns
selected_columns=[
"continent","location","total_cases","total_deaths",
"total_cases_per_million",
"total_deaths_per_million",
"total_tests",
"total_vaccinations",
"population",
"population_density",
"median_age","aged_65_older",
"aged_70_older",
"gdp_per_capita",
"extreme_poverty",
"cardiovasc_death_rate",
"diabetes_prevalence",
"female_smokers",
"male_smokers",
"handwashing_facilities",
"hospital_beds_per_thousand",
"life_expectancy",
"human_development_index"
]
# Cleanup by eliminating null values
new_df=df_t[df_t.continent.isnull().values!=True]
# +
# selected columns in the dataset
df_json=new_df[selected_columns]
df_json.rename(columns = {'location':'country'}, inplace = True)
df_json
# -
df_html.columns
# +
# Renaming column names
df_html.columns=['country', 'region', 'cumulative_total',
'total_cases_per_1m_population',
'total_cases',
'total_cases_per_1m_population_in_7days',
'new_cases_in_24hrs',
'cumulative_deaths',
'cumulative_deaths_per_1m_population',
'new_deaths',
'new_deaths_per_1m_population',
'deaths_in_24_hrs',
'trans_class']
# -
df_html
# creating DB and moving datafrom Dataframe to DB tables
conn = sql.connect('COVID.db')
df_csv.to_sql('COVID_CSV', conn)
df_html.to_sql('COVID_HTML', conn)
df_json.to_sql('COVID_JSON', conn)
# +
# retriving top 3 worst affected countries with respective per millian cases and deaths from CSV Source
conn = sql.connect('COVID.db')
c = conn.cursor()
res_CSV = c.execute("SELECT country,total_cases_per_1m_population ,total_deaths_per_1m_population FROM COVID_CSV order by total_cases_per_1m_population desc limit 3")
country_csv = []
cases_csv = []
death_csv = []
for row in res_CSV:
print (row)
country_csv.append(row[0])
cases_csv.append(row[1])
death_csv.append(row[2])
# +
# Preparing two subplots to show country wise cases and deaths
plt.subplot(121)
plt.plot(country_csv,cases_csv)
plt.title("Country vs Cases")
plt.xlabel("Countries")
plt.ylabel("Cases Per Million")
plt.subplot(122)
plt.plot(country_csv,death_csv)
plt.xlabel("Countries")
plt.ylabel("Cases Per Million")
plt.title("Country vs Deaths")
plt.show()
# -
# Bar char of worst affected countries
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(country_csv, cases_csv)
ax.set_ylabel('Cases Per Million')
ax.set_title('Top 3 Counteries ')
# Bar char of worst affected countries for death tolls
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(country_csv, death_csv)
ax.set_ylabel('Cases Per Million')
ax.set_title('Top 3 Counteries ')
# repeating exercise for HTML source
conn = sql.connect('COVID.db')
c = conn.cursor()
res_html = c.execute("SELECT country,total_cases_per_1m_population ,cumulative_deaths_per_1m_population FROM COVID_HTML order by total_cases_per_1m_population desc limit 3")
country_html= []
cases_html = []
death_html = []
for row in res_html:
print (row)
country_html.append(row[0])
cases_html.append(row[1])
death_html.append(row[2])
# repeating plot exercise for HTML source
plt.subplot(121)
plt.plot(country_html,cases_html)
plt.title("Country vs Cases")
plt.xlabel("Countries")
plt.ylabel("Cases Per Million")
plt.subplot(122)
plt.plot(country_html,death_html)
plt.xlabel("Countries")
plt.ylabel("Cases Per Million")
plt.title("Country vs Deaths")
plt.show()
# repeating bar graph for HTML source
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(country_html, cases_html)
ax.set_ylabel('Cases Per Million')
ax.set_title('Top 3 Counteries ')
# repeating bar graph for HTML source
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(country_html, death_html)
ax.set_ylabel('Cases Per Million')
ax.set_title('Top 3 Counteries ')
# Repeating exercise for JSON source
conn = sql.connect('COVID.db')
c = conn.cursor()
res_JSON = c.execute("SELECT country,total_cases_per_million ,total_deaths_per_million FROM COVID_JSON order by total_cases_per_million desc limit 3")
country_JSON = []
cases_JSON = []
death_JSON = []
for row in res_JSON:
print (row)
country_JSON.append(row[0])
cases_JSON.append(row[1])
death_JSON.append(row[2])
# Repeating Plot exercise for JSON source
plt.subplot(121)
plt.plot(country_JSON,cases_JSON)
plt.title("Country vs Cases")
plt.xlabel("Countries")
plt.ylabel("Cases Per Million")
plt.subplot(122)
plt.plot(country_JSON,death_JSON)
plt.xlabel("Countries")
plt.ylabel("Cases Per Million")
plt.title("Country vs Deaths")
plt.show()
# Bar Chart for cases
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(country_JSON, cases_JSON)
ax.set_ylabel('Cases Per Million')
ax.set_title('Top 3 Counteries ')
# Bar Chart for deaths
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(country_JSON, death_JSON)
ax.set_ylabel('Cases Per Million')
ax.set_title('Top 3 Counteries ')
# +
# combined scatterplot for all the three sources for worst affected counteries
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(country_csv, cases_csv, label='CSV')
ax1.scatter(country_html, cases_html, label='HTML')
ax1.scatter(country_JSON, cases_JSON,label='JSON')
plt.legend(loc='best');
plt.show()
# +
# combined scatterplot for all the three sources for most death tolls
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(country_csv, death_csv, label='CSV')
ax1.scatter(country_html, death_html, label='HTML')
ax1.scatter(country_JSON, death_JSON,label='JSON')
plt.legend(loc='best');
plt.show()
# -
# ## Summary
#
# ### Topic
# This Project targets COVID 19 country wise data for covid cases and deaths.
# ### Data Sources :
# Project refers following 3 data sources for CSV , HTML and JSON data respectively :
# 1. https://www.kaggle.com/josephassaker/covid19-global-dataset
# 2. https://corona.lmao.ninja/v2/continents?yesterday=true&sort=
# 3. https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/latest/owid-covid-latest.json"
#
# ### Clean up Process :
# Step - 1 was getting easy and manageable column names as column name was too big and descriptive. It was hard to manage column name with multiple words. All the columns are given appropriate captions (Used underscore "_" to
# break the words.
#
# Step 2 - validated and fixed data for typo and other variations especially "Country" field which was common field to link all the data sources.
#
# Step 3 : Removed unnecessary and unused fields. Though project primarily focuses on Country, Cases and death data
# but still considering future scope of the project few additional columns remain there in the system.
#
#
# ### Repository :
# SQLLite being used to store data in structural form. Since data sources are three different file formats , to
# Identify the source , here are three separate DB tables are created to store and maintain the data. Tables are :
# 1. COVID_CSV
# 2. COVID_HTML
# 3. COVID_JSON
#
# ### Combining Data :
#
# Data is stored in three separate table but can be combined using a common field “Country". This is a Primary Key in
# all the three tables.
#
# ### Visualization :
# matplotlib.pyplot library used to create line plot , scatterplot and Bar to Demonstrate country wise break up
# of cases or deaths
#
# ### Challenge :
# Data coming from three different sources on different dates wasn't completely in sync . All the thre sources has
# variation in data.
# Data scrapping from HTML source in particularly was a challenging tasks due to deep nesting of 'Div' in data source.
#
# ### Scope of Improvement :
#
# There is scope of further cleaning and average out the numbers to overcome difference in values from all the
# three sources.
# Scope of the project was limited to No of cases and Deaths for most affected top 3 counteris. This can be further enhanced for all the countries or countries from a particular region with factoring other key paramters like population of the country , economic status and healthcare facilities etc.
#
# ### Take Away :
# Overall journey from basics of data frames, filtering , transforming and revisiting visualization technique was
# very helpful in achieving hands on experience.
# Got significant exposure on python/Sqllite
| Week_11_12/Ayachit_Madhukar_Milestone5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# **Unsupervised learning: basics**
# ___
# - How does Google group news articles?
# - Labeled vs. unlabeled data?
# - Points without additional information given are unlabeled
# - the goal of unsupervised learning is to find patterns and interpret any structures within the data
# - clustering, anomaly detection, neural networks
# - What is clustering?
# - process of grouping items with similar characteristics
# - items in groups more similar to each other than in other groups
# ___
# + pycharm={"name": "#%%\n"}
#Pokémon sightings
#There have been reports of sightings of rare, legendary Pokémon. You
#have been asked to investigate! Plot the coordinates of sightings to
#find out where the Pokémon might be. The X and Y coordinates of the
#points are stored in list x and y, respectively.
# Import plotting class from matplotlib library
from matplotlib import pyplot as plt
x = [9, 6, 2, 3, 1, 7, 1, 6, 1, 7, 23, 26, 25, 23, 21, 23, 23, 20, 30, 23]
y = [8, 4, 10, 6, 0, 4, 10, 10, 6, 1, 29, 25, 30, 29, 29, 30, 25, 27, 26, 30]
# Create a scatter plot
plt.scatter(x, y)
# Display the scatter plot
plt.show()
# -
# **Basics of cluster analysis**
# ___
# - **cluster** - A group of items with similar characteristics
# - Google News - articles where similar words and word associations appear together
# - Customer Segmentation
# - **clustering algorithms**
# - hierarchical clustering
# - means are compared, after each step number of clusters is reduced by 1.
# - K means clustering
# - random cluster center is created for each cluster, and recalculated (centroids) a set number of times
# - DBSCAN, Gaussian methods
# + pycharm={"name": "#%%\n"}
#Pokémon sightings: hierarchical clustering
#We are going to continue the investigation into the sightings of
#legendary Pokémon from the previous exercise. Remember that in the
#scatter plot of the previous exercise, you identified two areas
#where Pokémon sightings were dense. This means that the points seem
#to separate into two clusters. In this exercise, you will form two
#clusters of the sightings using hierarchical clustering.
from matplotlib import pyplot as plt
import seaborn as sns, pandas as pd
x = [9, 6, 2, 3, 1, 7, 1, 6, 1, 7, 23, 26, 25, 23, 21, 23, 23, 20, 30, 23]
y = [8, 4, 10, 6, 0, 4, 10, 10, 6, 1, 29, 25, 30, 29, 29, 30, 25, 27, 26, 30]
data_tuples = list(zip(x,y))
df = pd.DataFrame(data_tuples, columns=['x','y'])
# Import linkage and fcluster functions
from scipy.cluster.hierarchy import linkage, fcluster
# Use the linkage() function to compute distances
Z = linkage(df, 'ward')
# Generate cluster labels
df['cluster_labels'] = fcluster(Z, 2, criterion='maxclust')
# Plot the points with seaborn
# There are some problems in matplotlib 3.3.1. hue='cluster_labels' does not work or add legend title. These need to be done manually
# see https://github.com/mwaskom/seaborn/issues/2194
sns.scatterplot(x='x', y='y', hue=df.cluster_labels.tolist(), data=df)
plt.legend(title='cluster labels')
plt.show()
#cluster label 0 can be removed if you store cluster labels as strings
# + pycharm={"name": "#%%\n"}
#Pokémon sightings: k-means clustering
#We are going to continue the investigation into the sightings of
#legendary Pokémon from the previous exercise. Just like the previous
#exercise, we will use the same example of Pokémon sightings. In this
#exercise, you will form clusters of the sightings using k-means
#clustering.
from matplotlib import pyplot as plt
import seaborn as sns, pandas as pd
x = [9, 6, 2, 3, 1, 7, 1, 6, 1, 7, 23, 26, 25, 23, 21, 23, 23, 20, 30, 23]
y = [8, 4, 10, 6, 0, 4, 10, 10, 6, 1, 29, 25, 30, 29, 29, 30, 25, 27, 26, 30]
#vq requires double or float type. Therefore type coercion is required
x = [float(i) for i in x]
y = [float(i) for i in y]
data_tuples = list(zip(x,y))
df = pd.DataFrame(data_tuples, columns=['x','y'])
# Import kmeans and vq functions
from scipy.cluster.vq import kmeans, vq
# Compute cluster centers
centroids,_ = kmeans(df, 2)
# Assign cluster labels
df['cluster_labels'],_ = vq(df, centroids)
# Plot the points with seaborn
sns.scatterplot(x='x', y='y', hue=df.cluster_labels.tolist(), data=df)
plt.legend(title='cluster labels')
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# **Data preparation for cluster analysis**
# ___
# - Why do we need to prepare data for clustering?
# - variables have incomparable units
# - variables with same units have different scales and variances
# - data in raw form may lead to bias in clustering
# - clusters may be heavily dependent on one variable
# - Solution: normalization of individual variables
# - Normalization of data
# - process of rescaling data to a standard deviation of 1
# - x_new = x/std_dev(x)
# - from scipi.cluster.vq import whiten
# ___
# + pycharm={"name": "#%%\n"}
#Normalize basic list data
#Now that you are aware of normalization, let us try to normalize
#some data. goals_for is a list of goals scored by a football team
#in their last ten matches. Let us standardize the data using the
#whiten() function.
# Import the whiten function
from scipy.cluster.vq import whiten
goals_for = [4,3,2,3,1,1,2,0,1,4]
# Use the whiten() function to standardize the data
scaled_data = whiten(goals_for)
print(scaled_data)
# + pycharm={"name": "#%%\n"}
#Visualize normalized data
#After normalizing your data, you can compare the scaled data to
#the original data to see the difference.
from matplotlib import pyplot as plt
from scipy.cluster.vq import whiten
goals_for = [4,3,2,3,1,1,2,0,1,4]
# Use the whiten() function to standardize the data
scaled_data = whiten(goals_for)
# Plot original data
plt.plot(goals_for, label='original')
# Plot scaled data
plt.plot(scaled_data, label='scaled')
# Show the legend in the plot
plt.legend()
# Display the plot
plt.show()
# + pycharm={"name": "#%%\n"}
#Normalization of small numbers
#In earlier examples, you have normalization of whole numbers. In
#this exercise, you will look at the treatment of fractional numbers
#- the change of interest rates in the country of Bangalla over the years.
from matplotlib import pyplot as plt
from scipy.cluster.vq import whiten
# Prepare data
rate_cuts = [0.0025, 0.001, -0.0005, -0.001, -0.0005, 0.0025, -0.001, -0.0015, -0.001, 0.0005]
# Use the whiten() function to standardize the data
scaled_data = whiten(rate_cuts)
# Plot original data
plt.plot(rate_cuts, label='original')
# Plot scaled data
plt.plot(scaled_data, label='scaled')
plt.legend()
plt.show()
# + pycharm={"name": "#%%\n"}
#FIFA 18: Normalize data
#FIFA 18 is a football video game that was released in 2017 for PC and
#consoles. The dataset that you are about to work on contains data on
#the 1000 top individual players in the game. You will explore various
#features of the data as we move ahead in the course. In this exercise,
#you will work with two columns, eur_wage, the wage of a player in Euros
#and eur_value, their current transfer market value.
#The data for this exercise is stored in a Pandas dataframe, fifa.
#from matplotlib import pyplot as plt
#from scipy.cluster.vq import whiten
# Scale wage and value
#fifa['scaled_wage'] = whiten(fifa['eur_wage'])
#fifa['scaled_value'] = whiten(fifa['eur_value'])
# Plot the two columns in a scatter plot
#fifa.plot(x='scaled_wage', y='scaled_value', kind = 'scatter')
#plt.show()
# Check mean and standard deviation of scaled values
#print(fifa[['scaled_wage', 'scaled_value']].describe())
#################################################
#<script.py> output:
# scaled_wage scaled_value
# count 1000.00 1000.00
# mean 1.12 1.31
# std 1.00 1.00
# min 0.00 0.00
# 25% 0.47 0.73
# 50% 0.85 1.02
# 75% 1.41 1.54
# max 9.11 8.98
#################################################
# -
# 
# **Basics of hierarchical clustering**
# ___
# - create a distance matrix using scipy.cluster.hierarchy.linkage
# - parameters:
# - **observations** - data
# - **method** - how to calculate the proximity of clusters
# - *single* - based on two closest objects
# - *complete* - based on two farthest objects
# - *average* - based on arithmetic mean of all objects
# - *centroid* - based on geometric mean of all objects
# - *median* - based on the median of all objects
# = *ward* - based on the sum of squares
# - **metric** - distance metric
# - **optimal ordering** - boolean
# - create cluster labels using scipy.cluster.hierarchy.fcluster
# - parameters:
# - **distance_matrix** - output of linkage() method
# - **num_clusters** - number of clusters
# - **criterion** - how to decide thresholds to form clusters
# ___
# + pycharm={"name": "#%%\n"}
#Hierarchical clustering: ward method
#It is time for Comic-Con! Comic-Con is an annual comic-based convention
#held in major cities in the world. You have the data of last year's
#footfall, the number of people at the convention ground at a given
#time. You would like to decide the location of your stall to maximize
#sales. Using the ward method, apply hierarchical clustering to find
#the two points of attraction in the area.
#The data is stored in a Pandas data frame, comic_con. x_scaled and
#y_scaled are the column names of the standardized X and Y coordinates
#of people at a given point in time.
# Import the fcluster and linkage functions
#from scipy.cluster.hierarchy import fcluster, linkage
# Use the linkage() function
#distance_matrix = linkage(comic_con[['x_scaled', 'y_scaled']], method = 'ward', metric = 'euclidean')
# Assign cluster labels
#comic_con['cluster_labels'] = fcluster(distance_matrix, 2, criterion='maxclust')
# Plot clusters
#sns.scatterplot(x='x_scaled', y='y_scaled',
# hue='cluster_labels', data = comic_con)
#plt.show()
# -
# 
# + pycharm={"name": "#%%\n"}
#Hierarchical clustering: single method
#Let us use the same footfall dataset and check if any changes are
#seen if we use a different method for clustering.
#The data is stored in a Pandas data frame, comic_con. x_scaled and
#y_scaled are the column names of the standardized X and Y coordinates
#of people at a given point in time.
# Import the fcluster and linkage functions
#from scipy.cluster.hierarchy import fcluster, linkage
# Use the linkage() function
#distance_matrix = linkage(comic_con[['x_scaled', 'y_scaled']], method = 'single', metric = 'euclidean')
# Assign cluster labels
#comic_con['cluster_labels'] = fcluster(distance_matrix, 2, criterion='maxclust')
# Plot clusters
#sns.scatterplot(x='x_scaled', y='y_scaled',
# hue='cluster_labels', data = comic_con)
#plt.show()
# -
# 
# + pycharm={"name": "#%%\n"}
#Hierarchical clustering: complete method
#For the third and final time, let us use the same footfall dataset
#and check if any changes are seen if we use a different method for
#clustering.
#The data is stored in a Pandas data frame, comic_con. x_scaled and
#y_scaled are the column names of the standardized X and Y coordinates
#of people at a given point in time.
# Import the fcluster and linkage functions
#from scipy.cluster.hierarchy import fcluster, linkage
# Use the linkage() function
#distance_matrix = linkage(comic_con[['x_scaled', 'y_scaled']], method = 'complete', metric = 'euclidean')
# Assign cluster labels
#comic_con['cluster_labels'] = fcluster(distance_matrix, 2, criterion='maxclust')
# Plot clusters
#sns.scatterplot(x='x_scaled', y='y_scaled',
# hue='cluster_labels', data = comic_con)
#plt.show()
# -
# 
# **Visualize clusters**
# ___
# - Why visualize clusters?
# - try to make sense of clusters formed
# - an additional step in validation of clusters
# - spot trends in data
#
# + pycharm={"name": "#%%\n"}
#Visualize clusters with matplotlib
#We have discussed that visualizations are necessary to assess the
#clusters that are formed and spot trends in your data. Let us now
#focus on visualizing the footfall dataset from Comic-Con using the
#matplotlib module.
#The data is stored in a Pandas data frame, comic_con. x_scaled and
#y_scaled are the column names of the standardized X and Y coordinates
#of people at a given point in time. cluster_labels has the cluster labels.
#A linkage object is stored in the variable distance_matrix.
#Import the pyplot class
#from matplotlib import pyplot as plt
# Define a colors dictionary for clusters
#colors = {1:'red', 2:'blue'}
# Plot a scatter plot
#comic_con.plot.scatter(x='x_scaled',
# y='y_scaled',
# c=comic_con['cluster_labels'].apply(lambda x: colors[x]))
#plt.show()
# -
# 
# + pycharm={"name": "#%%\n"}
#Visualize clusters with seaborn
#Let us now visualize the footfall dataset from Comic Con using the
#seaborn module. Visualizing clusters using seaborn is easier with
#the hue function for cluster labels.
#The data is stored in a Pandas data frame, comic_con. x_scaled and
#y_scaled are the column names of the standardized X and Y coordinates
#of people at a given point in time. cluster_labels has the cluster
#labels. A linkage object is stored in the variable distance_matrix.
# Import the seaborn module
#import seaborn as sns
# Plot a scatter plot using seaborn
#sns.scatterplot(x='x_scaled',
# y='y_scaled',
# hue='cluster_labels',
# data=comic_con)
#plt.show()
# -
# 
# **How many clusters?**
# ___
# - introduction to dendrograms
# - from scipy.cluster.hierarchy import dendrogram
# ___
# + pycharm={"name": "#%%\n"}
#Create a dendrogram
#Dendrograms are branching diagrams that show the merging of clusters
#as we move through the distance matrix. Let us use the Comic Con
#footfall data to create a dendrogram.
#The data is stored in a Pandas data frame, comic_con. x_scaled and
#y_scaled are the column names of the standardized X and Y coordinates
#of people at a given point in time. cluster_labels has the cluster
#labels. A linkage object is stored in the variable distance_matrix.
# Import the dendrogram function
#from scipy.cluster.hierarchy import dendrogram
# Create a dendrogram
#dn = dendrogram(distance_matrix)
# Display the dendogram
#plt.show()
# -
# 
# **Limitations of hierarchical clustering**
# ___
# - measuring speed in hierarchical clustering
# - *timeit* module
# - measure speed of *.linkage()* module
# - there is a quadratic increase in runtime, making it infeasible for large data sets
# ___
# + pycharm={"name": "#%%\n"}
#FIFA 18: exploring defenders
#In the FIFA 18 dataset, various attributes of players are present.
#Two such attributes are:
#sliding tackle: a number between 0-99 which signifies how accurate
#a player is able to perform sliding tackles
#aggression: a number between 0-99 which signifies the commitment
#and will of a player
#These are typically high in defense-minded players. In this exercise,
#you will perform clustering based on these attributes in the data.
#This data consists of 5000 rows, and is considerably larger than
#earlier datasets. Running hierarchical clustering on this data can
#take up to 10 seconds.
#The following modules are pre-loaded: dendrogram, linkage, fcluster
#from scipy.cluster.hierarchy, matplotlib.pyplot as plt, seaborn as
#sns. The data is stored in a Pandas dataframe, fifa.
# Fit the data into a hierarchical clustering algorithm
#distance_matrix = linkage(fifa[['scaled_sliding_tackle', 'scaled_aggression']], 'ward')
# Assign cluster labels to each row of data
#fifa['cluster_labels'] = fcluster(distance_matrix, 3, criterion='maxclust')
# Display cluster centers of each cluster
#print(fifa[['scaled_sliding_tackle', 'scaled_aggression', 'cluster_labels']].groupby('cluster_labels').mean())
# Create a scatter plot through seaborn
#sns.scatterplot(x='scaled_sliding_tackle', y='scaled_aggression', hue='cluster_labels', data=fifa)
#plt.show()
#################################################
#<script.py> output:
# scaled_sliding_tackle scaled_aggression
# cluster_labels
# 1 2.99 4.35
# 2 0.74 1.94
# 3 1.34 3.62
#################################################
# -
# 
# **Basics of k-means clustering**
# ___
# - Why k-means clustering?
# - a critical drawback of hierarchical clustering: runtime
# - K means runs significantly faster on large datasets
# - Step 1: Generate cluster centers
# - kmeans(obs, k_or_guess, iter, thresh, check_finite)
# - obs: - standardized observations
# - k_or_guess: - number of clusters
# - iter: - number of iterations (default 20)
# - thresh: - threshold based on distortions (default 1e-05)
# - check_finite: - whether to check if observations contain only finite numbers (default: True)
# - returns two objects: cluster centers (code_book), distortion
# - how is distortion calculated?
# - sum of square distances between data points and cluster centers
# - Step 2: Generate cluster labels
# - vq(obs, code_book, check_finite=True)
# - obs: - standardized observations
# - code_book: - cluster centers
# - check_finite - whether to check if observations contain only finite numbers (default: True)
# - returns two objects: a list of cluster labels, a list of distortions
# - A note on distortions
# - kmeans - returns a single value of distortions based on the data
# - vq - returns a list of distortions one for each data point (mean approximates kmeans distortion)
# ___
# + pycharm={"name": "#%%\n"}
#K-means clustering: first exercise
#This exercise will familiarize you with the usage of k-means
#clustering on a dataset. Let us use the Comic Con dataset and check
#how k-means clustering works on it.
#Recall the two steps of k-means clustering:
#Define cluster centers through kmeans() function. It has two required
#arguments: observations and number of clusters.
#Assign cluster labels through the vq() function. It has two required
#arguments: observations and cluster centers.
#The data is stored in a Pandas data frame, comic_con. x_scaled and
#y_scaled are the column names of the standardized X and Y coordinates
#of people at a given point in time.
# Import the kmeans and vq functions
#from scipy.cluster.vq import kmeans, vq
# Generate cluster centers
#cluster_centers, distortion = kmeans(comic_con[['x_scaled', 'y_scaled']], 2)
# Assign cluster labels
#comic_con['cluster_labels'], distortion_list = vq(comic_con[['x_scaled', 'y_scaled']], cluster_centers)
# Plot clusters
#sns.scatterplot(x='x_scaled', y='y_scaled',
# hue='cluster_labels', data = comic_con)
#plt.show()
#NOTE: runtime is 50 ms vs 5 seconds for hierarchical clustering
# -
# 
# + [markdown] pycharm={"name": "#%% md\n"}
# **How many clusters?**
# ___
# - How to find the right k?
# - no absolute method to find right number of clusters (k) in k-means clustering
# - elbow plot method
# - line plot between cluster centers (x-axis) and distortion (y-axis)
# - elbow plot helps indicate number of clusters present in data
# - only gives an indication of optimal k (numbers of clusters)
# - does not always pinpoint how many k (numbers of clusters)
# - other methods: average silhouette, gap statistic
# - distortions revisited
# - sum of squares between each data point and cluster center for each cluster
# - decreases with an increasing number of clusters
# - becomes zero when the number of clusters equals the number of points
# ___
# + pycharm={"name": "#%%\n"}
#Elbow method on distinct clusters
#Let us use the comic con data set to see how the elbow plot looks
#on a data set with distinct, well-defined clusters. You may want
#to display the data points before proceeding with the exercise.
#The data is stored in a Pandas data frame, comic_con. x_scaled and
#y_scaled are the column names of the standardized X and Y coordinates
#of people at a given point in time.
#distortions = []
#num_clusters = range(1, 7)
# Create a list of distortions from the kmeans function
#for i in num_clusters:
# cluster_centers, distortion = kmeans(comic_con[['x_scaled', 'y_scaled']], i)
# distortions.append(distortion)
# Create a data frame with two lists - num_clusters, distortions
#elbow_plot = pd.DataFrame({'num_clusters': num_clusters, 'distortions': distortions})
# Creat a line plot of num_clusters and distortions
#sns.lineplot(x='num_clusters', y='distortions', data = elbow_plot)
#plt.xticks(num_clusters)
#plt.show()
# -
# 
# + pycharm={"name": "#%%\n"}
#Elbow method on uniform data
#In the earlier exercise, you constructed an elbow plot on data with
#well-defined clusters. Let us now see how the elbow plot looks on a
#data set with uniformly distributed points. You may want to display
#the data points on the console before proceeding with the exercise.
#The data is stored in a Pandas data frame, uniform_data. x_scaled
#and y_scaled are the column names of the standardized X and Y
#coordinates of points.
#distortions = []
#num_clusters = range(2, 7)
# Create a list of distortions from the kmeans function
#for i in num_clusters:
# cluster_centers, distortion = kmeans(uniform_data[['x_scaled', 'y_scaled']], i)
# distortions.append(distortion)
# Create a data frame with two lists - number of clusters and distortions
#elbow_plot = pd.DataFrame({'num_clusters': num_clusters, 'distortions': distortions})
# Creat a line plot of num_clusters and distortions
#sns.lineplot(x='num_clusters', y='distortions', data=elbow_plot)
#plt.xticks(num_clusters)
#plt.show()
# -
# 
# **Limitations of k-means clustering**
# ___
# - How to find the right number of k (number of clusters)?
# - impact of seeds
# - set random number generator seed for consistency across multiple iteration of same code
# - *from numpy import random* \\ *random.seed(12)*
# - biased towards equal sized clusters
# - hierarchical clustering does not
# + pycharm={"name": "#%%\n"}
#Impact of seeds on distinct clusters
#You noticed the impact of seeds on a dataset that did not have
#well-defined groups of clusters. In this exercise, you will explore
#whether seeds impact the clusters in the Comic Con data, where the
#clusters are well-defined.
#The data is stored in a Pandas data frame, comic_con. x_scaled and
#y_scaled are the column names of the standardized X and Y
#coordinates of people at a given point in time.
# Import random class
#from numpy import random
# Initialize seed
#random.seed(0)
# Run kmeans clustering
#cluster_centers, distortion = kmeans(comic_con[['x_scaled', 'y_scaled']], 2)
#comic_con['cluster_labels'], distortion_list = vq(comic_con[['x_scaled', 'y_scaled']], cluster_centers)
# Plot the scatterplot
#sns.scatterplot(x='x_scaled', y='y_scaled',
# hue='cluster_labels', data = comic_con)
#plt.show()
# -
# 
# + pycharm={"name": "#%%\n"}
#Change your code from the earlier step so that the seed is
#initialized with a list [1, 2, 1000].
# Import random class
#from numpy import random
# Initialize seed
#random.seed([1, 2, 1000])
# Run kmeans clustering
#cluster_centers, distortion = kmeans(comic_con[['x_scaled', 'y_scaled']], 2)
#comic_con['cluster_labels'], distortion_list = vq(comic_con[['x_scaled', 'y_scaled']], cluster_centers)
# Plot the scatterplot
#sns.scatterplot(x='x_scaled', y='y_scaled',
# hue='cluster_labels', data = comic_con)
#plt.show()
# -
# 
# ___
# Notice that the plots have not changed after changing the seed as the clusters are well-defined.
# ___
# **Uniform clustering patterns**
# ___
# Now that you are familiar with the impact of seeds, let us look at
# the bias in k-means clustering towards the formation of uniform
# clusters.
#
# Let us use a mouse-like dataset for our next exercise. A mouse-like
# dataset is a group of points that resemble the head of a mouse: it
# has three clusters of points arranged in circles, one each for the
# face and two ears of a mouse.
#
# Here is how a typical mouse-like dataset looks like:
#
# <img src="https://www.researchgate.net/profile/Simone_Ludwig/publication/256378655/figure/fig3/AS:667689227939842@1536200925583/Clustering-results-for-the-Mouse-data-set-where-the-black-boxes-represent-the-centroids.ppm" alt="Mouse" width="500" style="vertical-align:middle">
# ___
# + pycharm={"name": "#%%\n"}
# Generate cluster centers
#cluster_centers, distortion = kmeans(mouse[['x_scaled', 'y_scaled']], 3)
# Assign cluster labels
#mouse['cluster_labels'], distortion_list = vq(mouse[['x_scaled', 'y_scaled']], cluster_centers)
# Plot clusters
#sns.scatterplot(x='x_scaled', y='y_scaled',
# hue='cluster_labels', data = mouse)
#plt.show()
# -
# 
# ___
# Notice that kmeans is unable to capture the three visible clusters
# clearly, and the two clusters towards the top have taken in some
# points along the boundary. This happens due to the underlying
# assumption in kmeans algorithm to minimize distortions which leads
# to clusters that are similar in terms of area.
# ___
# + pycharm={"name": "#%%\n"}
#FIFA 18: defenders revisited
#In the FIFA 18 dataset, various attributes of players are present.
#Two such attributes are:
#defending: a number which signifies the defending attributes of a player
#physical: a number which signifies the physical attributes of a player
#These are typically defense-minded players. In this exercise, you
#will perform clustering based on these attributes in the data.
#The following modules have been pre-loaded: kmeans, vq from
#scipy.cluster.vq, matplotlib.pyplot as plt, seaborn as sns. The
#data for this exercise is stored in a Pandas dataframe, fifa. The
#scaled variables are scaled_def and scaled_phy.
# Set up a random seed in numpy
#random.seed([1000,2000])
# Fit the data into a k-means algorithm
#cluster_centers,_ = kmeans(fifa[['scaled_def', 'scaled_phy']], 3)
# Assign cluster labels
#fifa['cluster_labels'],_ = vq(fifa[['scaled_def', 'scaled_phy']], cluster_centers)
# Display cluster centers
#print(fifa[['scaled_def', 'scaled_phy', 'cluster_labels']].groupby('cluster_labels').mean())
#################################################
#<script.py> output:
# scaled_def scaled_phy
# cluster_labels
# 0 3.74 8.87
# 1 1.87 7.08
# 2 2.10 8.94
#################################################
# Create a scatter plot through seaborn
#sns.scatterplot(x='scaled_def', y='scaled_phy', hue='cluster_labels', data=fifa)
#plt.show()
# -
# 
# ___
# Notice that the seed has an impact on clustering as the data is
# uniformly distributed.
# ___
# **Dominant colors in images**
# ___
# - all images consist of pixels
# - each pixel has three values: red, green, and blue (0-255)
# - pixel color: combination of these RGB values
# - perform k-means on standardized RGB values to find cluster centers
# - uses: identifying features in satellite images
# - tools to find dominant colors
# - convert image to pixels: matplotlib.image.imread
# - display colors of cluster centers: matplotlib.pyplot.imshow
# ___
# + [markdown] pycharm={"name": "#%% md\n"}
# **Extract RGB values from image**
# ___
# There are broadly three steps to find the dominant colors in an image:
#
# - Extract RGB values into three lists.
# - Perform k-means clustering on scaled RGB values.
# - Display the colors of cluster centers.
#
# To extract RGB values, we use the imread() function of the image
# class of matplotlib. Empty lists, r, g and b have been initialized.
#
# For the purpose of finding dominant colors, we will be using the
# following image.
#
# 
# ___
# + pycharm={"name": "#%%\n"}
# Import image class of matplotlib
import matplotlib.image as img
r, g, b = ([] for i in range(3))
# Read batman image and print dimensions
batman_image = img.imread('_images/12.1.jpg')
print(batman_image.shape)
# Store RGB values of all pixels in lists r, g and b
for row in batman_image:
for temp_r, temp_g, temp_b in row:
r.append(temp_r)
g.append(temp_g)
b.append(temp_b)
#output is a m x n matrix
# -
# **How many dominant colors?**
# ___
# We have loaded the following image using the imread() function of the image class of matplotlib.
# 
# The RGB values are stored in a data frame, batman_df.
# The RGB values have been standardized used the whiten() function,
# stored in columns, scaled_red, scaled_blue and scaled_green.
#
# Construct an elbow plot with the data frame. How many dominant
# colors are present?
# ___
# + pycharm={"name": "#%%\n"}
# Import image class of matplotlib
from numpy import random
from scipy.cluster.vq import vq, kmeans, whiten
import matplotlib.image as img
import matplotlib.pyplot as plt
import pandas as pd, seaborn as sns
# Initialize seed
random.seed(123)
r, g, b = ([] for i in range(3))
# Read batman image
batman_image = img.imread('_images/12.1.jpg')
# Store RGB values of all pixels in lists r, g and b
for row in batman_image:
for temp_r, temp_g, temp_b in row:
r.append(temp_r)
g.append(temp_g)
b.append(temp_b)
batman_df = pd.DataFrame(list(zip(r, b , g)), columns=['red','blue','green'])
# Scale r, g, and b
batman_df['scaled_red'] = whiten(batman_df['red'])
batman_df['scaled_blue'] = whiten(batman_df['blue'])
batman_df['scaled_green'] = whiten(batman_df['green'])
distortions = []
num_clusters = range(1, 7)
# Create a list of distortions from the kmeans function
for i in num_clusters:
cluster_centers, distortion = kmeans(batman_df[['scaled_red', 'scaled_blue', 'scaled_green']], i)
distortions.append(distortion)
# Create a data frame with two lists, num_clusters and distortions
elbow_plot = pd.DataFrame({'num_clusters': num_clusters, 'distortions': distortions})
# Create a line plot of num_clusters and distortions
sns.lineplot(x='num_clusters', y='distortions', data = elbow_plot)
plt.xticks(num_clusters)
plt.show()
#Notice that there are three distinct colors present in the image, which is supported by the elbow plot.
# -
# **Display dominant colors**
# ___
# We have loaded the following image using the imread() function of
# the image class of matplotlib.
# 
# To display the dominant colors, convert the colors of the cluster
# centers to their raw values and then converted them to the range
# of 0-1, using the following formula:
# **converted_pixel = standardized_pixel * pixel_std / 255**
#
# The RGB values are stored in a data frame, batman_df. The scaled
# RGB values are stored in columns, scaled_red, scaled_blue and
# scaled_green. The cluster centers are stored in the variable
# cluster_centers, which were generated using the kmeans() function
# with three clusters.
# ___
# + pycharm={"name": "#%%\n"}
# Import image class of matplotlib
from numpy import random
from scipy.cluster.vq import vq, kmeans, whiten
import matplotlib.image as img
import matplotlib.pyplot as plt
import pandas as pd, seaborn as sns
# Initialize seed
random.seed(123)
r, g, b = ([] for i in range(3))
# Read batman image
batman_image = img.imread('_images/12.1.jpg')
# Store RGB values of all pixels in lists r, g and b
for row in batman_image:
for temp_r, temp_g, temp_b in row:
r.append(temp_r)
g.append(temp_g)
b.append(temp_b)
batman_df = pd.DataFrame(list(zip(r, b , g)), columns=['red','blue','green'])
# Scale r, g, and b
batman_df['scaled_red'] = whiten(batman_df['red'])
batman_df['scaled_blue'] = whiten(batman_df['blue'])
batman_df['scaled_green'] = whiten(batman_df['green'])
# kmeans function with 3 clusters
cluster_centers, distortion = kmeans(batman_df[['scaled_red', 'scaled_blue', 'scaled_green']], 3)
# Get standard deviations of each color
r_std, g_std, b_std = batman_df[['red', 'green', 'blue']].std()
colors = []
for cluster_center in cluster_centers:
scaled_r, scaled_g, scaled_b = cluster_center
# Convert each standardized value to scaled value
colors.append((
scaled_r * r_std / 255,
scaled_g * g_std / 255,
scaled_b * b_std / 255
))
# Display colors of cluster centers
plt.imshow([colors])
plt.show()
# -
# **Document clustering**
# ___
# - concepts
# - clean data before processing
# - remove punctuation, emoticons, words siuch as "the" "is" "are"
# - determine the importance of terms in a document (in TF-IDF matrix)
# - cluster TF-IDF matrix
# - find top terms, documents in each cluster
# -
# + pycharm={"name": "#%%\n"}
#TF-IDF of movie plots
#Let us use the plots of randomly selected movies to perform
#document clustering on. Before performing clustering on documents,
#they need to be cleaned of any unwanted noise (such as special
#characters and stop words) and converted into a sparse matrix
#through TF-IDF of the documents.
#Use the TfidfVectorizer class to perform the TF-IDF of movie
#plots stored in the list plots. The remove_noise() function is
#available to use as a tokenizer in the TfidfVectorizer class.
#The .fit_transform() method fits the data into the
#TfidfVectorizer objects and then generates the TF-IDF sparse
#matrix.
#Note: It takes a few seconds to run the .fit_transform() method.
# Import TfidfVectorizer class from sklearn
#from sklearn.feature_extraction.text import TfidfVectorizer
# Initialize TfidfVectorizer
#tfidf_vectorizer = TfidfVectorizer(max_df=0.75, max_features=50,
# min_df=0.1, tokenizer=remove_noise)
# Use the .fit_transform() method on the list plots
#tfidf_matrix = tfidf_vectorizer.fit_transform(plots)
# + pycharm={"name": "#%%\n"}
#Top terms in movie clusters
#Now that you have created a sparse matrix, generate cluster
#centers and print the top three terms in each cluster. Use the
#.todense() method to convert the sparse matrix, tfidf_matrix to
#a normal matrix for the kmeans() function to process. Then, use
#the .get_feature_names() method to get a list of terms in the
#tfidf_vectorizer object. The zip() function in Python joins two
#lists.
#The tfidf_vectorizer object and sparse matrix, tfidf_matrix, from
#the previous have been retained in this exercise. kmeans has been
#imported from SciPy.
#With a higher number of data points, the clusters formed would be
#defined more clearly. However, this requires some computational
#power, making it difficult to accomplish in an exercise here.
#num_clusters = 2
# Generate cluster centers through the kmeans function
#cluster_centers, distortion = kmeans(tfidf_matrix.todense(), num_clusters)
# Generate terms from the tfidf_vectorizer object
#terms = tfidf_vectorizer.get_feature_names()
#for i in range(num_clusters):
# Sort the terms and print top 3 terms
# center_terms = dict(zip(terms, list(cluster_centers[i])))
# sorted_terms = sorted(center_terms, key=center_terms.get, reverse=True)
# print(sorted_terms[:3])
#################################################
#<script.py> output:
# ['father', 'back', 'one']
# ['police', 'man', 'killed']
#################################################
#Notice positive, warm words in the first cluster and words
#referring to action in the second cluster.
# -
# **Clustering with multiple features**
# ___
# - Visualizations
# - visualize cluster centers
# - visualize other variables for each cluster
# - Feature reduction
# - factor analysis
# - multidimensional scaling
# ___
# + pycharm={"name": "#%%\n"}
#Basic checks on clusters
#In the FIFA 18 dataset, we have concentrated on defenders in
#previous exercises. Let us try to focus on attacking attributes
#of a player. Pace (pac), Dribbling (dri) and Shooting (sho) are
#features that are present in attack minded players. In this
#exercise, k-means clustering has already been applied on the data
#using the scaled values of these three attributes. Try some basic
#checks on the clusters so formed.
#The data is stored in a Pandas data frame, fifa. The scaled column
#names are present in a list scaled_features. The cluster labels are
#stored in the cluster_labels column. Recall the .count() and .mean()
#methods in Pandas help you find the number of observations and mean
#of observations in a data frame.
# Print the size of the clusters
#print(fifa.groupby('cluster_labels')['ID'].count())
# Print the mean value of wages in each cluster
#print(fifa.groupby('cluster_labels')['eur_wage'].mean())
#################################################
#<script.py> output:
# cluster_labels
# 0 83
# 1 107
# 2 60
# Name: ID, dtype: int64
# cluster_labels
# 0 132108.43
# 1 130308.41
# 2 117583.33
# Name: eur_wage, dtype: float64
#################################################
#In this example, the cluster sizes are not very different, and
#there are no significant differences that can be seen in the wages.
#Further analysis is required to validate these clusters.
# -
# **FIFA 18: what makes a complete player?**
# ___
# The overall level of a player in FIFA 18 is defined by six characteristics:
# pace (pac), shooting (sho), passing (pas), dribbling (dri), defending (def), physical (phy).
#
# Here is a sample card:
# 
# ___
# + pycharm={"name": "#%%\n"}
#In this exercise, you will use all six characteristics to create
#clusters. The data for this exercise is stored in a Pandas
#dataframe, fifa. features is the list of these column names and
#scaled_features is the list of columns which contains their scaled
#values. The following have been pre-loaded: kmeans, vq from
#scipy.cluster.vq, matplotlib.pyplot as plt, seaborn as sns.
#Before you start the exercise, you may wish to explore
#scaled_features in the console to check out the list of six
#scaled columns names.
#################################################
#In [2]: scaled_features
#Out[2]:
#['scaled_pac',
# 'scaled_sho',
# 'scaled_pas',
# 'scaled_dri',
# 'scaled_def',
# 'scaled_phy']
#################################################
# Create centroids with kmeans for 2 clusters
#cluster_centers,_ = kmeans(fifa[scaled_features], 2)
# Assign cluster labels and print cluster centers
#fifa['cluster_labels'], _ = vq(fifa[scaled_features], cluster_centers)
#print(fifa.groupby('cluster_labels')[scaled_features].mean())
#################################################
#<script.py> output:
# scaled_pac scaled_sho scaled_pas scaled_dri scaled_def \
# cluster_labels
# 0 6.68 5.43 8.46 8.51 2.50
# 1 5.44 3.66 7.17 6.76 3.97
#
# scaled_phy
# cluster_labels
# 0 8.34
# 1 9.21
#################################################
# Plot cluster centers to visualize clusters
#fifa.groupby('cluster_labels')[scaled_features].mean().plot(legend=True, kind='bar')
# Get the name column of first 5 players in each cluster
#for cluster in fifa['cluster_labels'].unique():
# print(cluster, fifa[fifa['cluster_labels'] == cluster]['name'].values[:5])
#################################################
# 0 ['<NAME>' '<NAME>' 'Neymar' '<NAME>' '<NAME>']
# 1 ['<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>']
#################################################
#The data was sorted before you performed the clustering. Notice
#the top players in each cluster are representative of the overall
#characteristics of the cluster - one of the clusters primarily
#represents attackers, whereas the other represents defenders.
#Surprisingly, a top goalkeeper <NAME> is seen in the attackers
#group, but he is known for going out of the box and participating
#in open play, which are reflected in his FIFA 18 attributes.
# -
# 
# **Farewell!**
# ___
# - What comes next?
# - clustering is one of the exploratory steps
# - practice!
# ___
| datacamp/cluster analysis in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <img src="fuellogo.svg" style="float:left; padding-right:1em;" width=150 />
#
# # AIRPLANE FUEL
# *Minimize fuel burn for a plane that can sprint and land quickly.*
#
# ### Set up the modelling environment
#
# First we'll to import GPkit and turn on $\LaTeX$ printing for GPkit variables and equations.
# + slideshow={"slide_type": "slide"}
import numpy as np
from gpkit.shortcuts import *
import gpkit.interactive
# %matplotlib inline
# -
# ### declare constants
# + slideshow={"slide_type": "slide"}
N_lift = Var("N_{lift}", 6.0, "-", "Wing loading multiplier")
pi = Var("\\pi", np.pi, "-", "Half of the circle constant")
sigma_max = Var("\\sigma_{max}", 250e6, "Pa", "Allowable stress, 6061-T6")
sigma_maxshear = Var("\\sigma_{max,shear}", 167e6, "Pa", "Allowable shear stress")
g = Var("g", 9.8, "m/s^2", "Gravitational constant")
w = Var("w", 0.5, "-", "Wing-box width/chord")
r_h = Var("r_h", 0.75, "-", "Wing strut taper parameter")
f_wadd = Var("f_{wadd}", 2, "-", "Wing added weight fraction")
W_fixed = Var("W_{fixed}", 14.7e3, "N", "Fixed weight")
C_Lmax = Var("C_{L,max}", 1.5, "-", "Maximum C_L, flaps down")
rho = Var("\\rho", 0.91, "kg/m^3", "Air density, 3000m")
rho_sl = Var("\\rho_{sl}", 1.23, "kg/m^3", "Air density, sea level")
rho_alum = Var("\\rho_{alum}", 2700, "kg/m^3", "Density of aluminum")
mu = Var("\\mu", 1.69e-5, "kg/m/s", "Dynamic viscosity, 3000m")
e = Var("e", 0.95, "-", "Wing spanwise efficiency")
A_prop = Var("A_{prop}", 0.785, "m^2", "Propeller disk area")
eta_eng = Var("\\eta_{eng}", 0.35, "-", "Engine efficiency")
eta_v = Var("\\eta_v", 0.85, "-", "Propeller viscous efficiency")
h_fuel = Var("h_{fuel}", 42e6, "J/kg", "fuel heating value")
V_sprint_reqt = Var("V_{sprintreqt}", 150, "m/s", "sprint speed requirement")
W_pay = Var("W_{pay}", 500*9.81, "N")
R_min = Var("R_{min}", 1e6, "m", "Minimum airplane range")
V_stallmax = Var("V_{stall,max}", 40, "m/s", "Stall speed")
# sweep variables
R_min = Var("R_{min}", 5e6, "m", "Minimum airplane range")
V_stallmax = Var("V_{stall,max}", 40, "m/s", "Stall speed")
# -
# ### declare free variables
# + slideshow={"slide_type": "subslide"}
V = Vec(3, "V", "m/s", "Flight speed")
C_L = Vec(3, "C_L", "-", "Wing lift coefficent")
C_D = Vec(3, "C_D", "-", "Wing drag coefficent")
C_Dfuse = Vec(3, "C_{D_{fuse}}", "-", "Fuselage drag coefficent")
C_Dp = Vec(3, "C_{D_p}", "-", "drag model parameter")
C_Di = Vec(3, "C_{D_i}", "-", "drag model parameter")
T = Vec(3, "T", "N", "Thrust force")
Re = Vec(3, "Re", "-", "Reynold's number")
W = Vec(3, "W", "N", "Aircraft weight")
eta_i = Vec(3, "\\eta_i", "-", "Aircraft efficiency")
eta_prop = Vec(3, "\\eta_{prop}", "-")
eta_0 = Vec(3, "\\eta_0", "-")
W_fuel = Vec(2, "W_{fuel}", "N", "Fuel weight")
z_bre = Vec(2, "z_{bre}", "-")
S = Var("S", "m^2", "Wing area")
R = Var("R", "m", "Airplane range")
A = Var("A", "-", "Aspect Ratio")
I_cap = Var("I_{cap}", "m^4", "Spar cap area moment of inertia per unit chord")
M_rbar = Var("\\bar{M}_r", "-")
P_max = Var("P_{max}", "W")
V_stall = Var("V_{stall}", "m/s")
nu = Var("\\nu", "-")
p = Var("p", "-")
q = Var("q", "-")
tau = Var("\\tau", "-")
t_cap = Var("t_{cap}", "-")
t_web = Var("t_{web}", "-")
W_cap = Var("W_{cap}", "N")
W_zfw = Var("W_{zfw}", "N", "Zero fuel weight")
W_eng = Var("W_{eng}", "N")
W_mto = Var("W_{mto}", "N", "Maximum takeoff weight")
W_pay = Var("W_{pay}", "N")
W_tw = Var("W_{tw}", "N")
W_web = Var("W_{web}", "N")
W_wing = Var("W_{wing}", "N")
# -
# Check the vector constraints:
W == 0.5*rho*C_L*S*V**2
# ### Form the optimization problem
#
# In the 3-element vector variables, indices 0, 1, and 2 are the outbound, return and sprint flights.
# + slideshow={"slide_type": "subslide"}
steady_level_flight = (W == 0.5*rho*C_L*S*V**2,
T >= 0.5*rho*C_D*S*V**2,
Re == (rho/mu)*V*(S/A)**0.5)
landing_fc = (W_mto <= 0.5*rho_sl*V_stall**2*C_Lmax*S,
V_stall <= V_stallmax)
sprint_fc = (P_max >= T[2]*V[2]/eta_0[2],
V[2] >= V_sprint_reqt)
drag_model = (C_D >= (0.05/S)*gpkit.units.m**2 +C_Dp + C_L**2/(pi*e*A),
1 >= (2.56*C_L**5.88/(Re**1.54*tau**3.32*C_Dp**2.62) +
3.8e-9*tau**6.23/(C_L**0.92*Re**1.38*C_Dp**9.57) +
2.2e-3*Re**0.14*tau**0.033/(C_L**0.01*C_Dp**0.73) +
1.19e4*C_L**9.78*tau**1.76/(Re*C_Dp**0.91) +
6.14e-6*C_L**6.53/(Re**0.99*tau**0.52*C_Dp**5.19)))
propulsive_efficiency = (eta_0 <= eta_eng*eta_prop,
eta_prop <= eta_i*eta_v,
4*eta_i + T*eta_i**2/(0.5*rho*V**2*A_prop) <= 4)
# 4th order taylor approximation for e^x
z_bre_sum = 0
for i in range(1,5):
z_bre_sum += z_bre**i/np.math.factorial(i)
range_constraints = (R >= R_min,
z_bre >= g*R*T[:2]/(h_fuel*eta_0[:2]*W[:2]),
W_fuel/W[:2] >= z_bre_sum)
punits = gpkit.units.parse_expression('N/W^0.8083')
weight_relations = (W_pay >= 500*g*gpkit.units.kg,
W_tw >= W_fixed + W_pay + W_eng,
W_zfw >= W_tw + W_wing,
W_eng >= 0.0372*P_max**0.8083 * punits,
W_wing/f_wadd >= W_cap + W_web,
W[0] >= W_zfw + W_fuel[1],
W[1] >= W_zfw,
W_mto >= W[0] + W_fuel[0],
W[2] == W[0])
wunits = gpkit.units.m**-4
munits = gpkit.units.parse_expression('Pa*m**6')
wing_structural_model = (2*q >= 1 + p,
p >= 2.2,
tau <= 0.25,
M_rbar >= W_tw*A*p/(24*gpkit.units.N),
.92**2/2*w*tau**2*t_cap >= I_cap * wunits + .92*w*tau*t_cap**2,
8 >= N_lift*M_rbar*A*q**2*tau/S/I_cap/sigma_max * munits,
12 >= A*W_tw*N_lift*q**2/tau/S/t_web/sigma_maxshear,
nu**3.94 >= .86*p**-2.38 + .14*p**0.56,
W_cap >= 8*rho_alum*g*w*t_cap*S**1.5*nu/3/A**.5,
W_web >= 8*rho_alum*g*r_h*tau*t_web*S**1.5*nu/3/A**.5
)
# + slideshow={"slide_type": "subslide"}
eqns = (weight_relations + range_constraints + propulsive_efficiency
+ drag_model + steady_level_flight + landing_fc + sprint_fc + wing_structural_model)
m = gpkit.Model(W_fuel.sum(), eqns)
# -
# ### Design an airplane
m.interact()
# The "local model" is the power-law tangent to the Pareto frontier, gleaned from sensitivities.
m.solution["localmodel"]
# ### plot design frontiers
from gpkit.interactive.plotting import sensitivity_plot
_ = sensitivity_plot(m)
# ### Interactive analysis
#
# Let's investigate it with the [cadtoons](https://github.com/bqpd/cadtoons) library. Running `cadtoon.py flightconditions.svg` in this folder creates an interactive SVG graphic for us.
#
# First, import the functions to display HTML in iPython Notebook, and the [ractivejs](http://www.ractivejs.org/) library.
# +
from string import Template
fuelupdate_js = Template("""
var W_eng = $W_eng,
lam = $lam
fuel.shearinner.scalex = 1-$tcap*10
fuel.shearinner.scaley = 1-$tweb*100
fuel.airfoil.scaley = $tau/0.13
fuel.fuse.scalex = $W_fus/24000
fuel.wing.scalex = $b/2/14
fuel.wing.scaley = $cr*1.21
""")
def fuelupdate_py(sol):
varstrs = ("p", "S", "A", "t_{cap}", "t_{web}", "w",
"\\tau", "W_{eng}", "W_{mto}", "W_{wing}")
p, S, A, t_cap, t_web, w, tau, W_eng, W_mto, W_wing = sol.getvars(*varstrs)
lam = 0.5*(p-1)
return fuelupdate_js.substitute(lam = lam,
b = (S*A)**0.5,
cr = 2/(1+lam)*(S/A)**0.5,
tcap = t_cap/tau,
tweb = t_web/w,
tau = tau,
W_eng = W_eng,
W_fus = W_mto - W_wing - W_eng)
fuelconstraint_js = """
fuel.engine1.scale = Math.pow(W_eng/3000, 2/3)
fuel.engine2.scale = Math.pow(W_eng/3000, 2/3)
fuel.engine1.y = 6*lam
fuel.engine2.y = 6*lam
fuel.wingrect.scaley = 1-lam
fuel.wingrect.y = -6 + 5*lam
fuel.wingtaper.scaley = lam
fuel.wingtaper.y = 5*lam
"""
# -
gpkit.interactive.showcadtoon("fuel",
"position:absolute; height:0; right:0; top:24em;")
gpkit.interactive.ractorpy(m, fuelupdate_py,
{"V_{stall,max}": (20, 50, 0.3),
"R_{min}": (1e6, 1e7, 1e5),
"V_{sprintreqt}": (100, 200, 1)},
fuelconstraint_js)
gpkit.interactive.ractorjs("fuel", m, fuelupdate_py,
{"V_{stall,max}": (20, 50, 3),
"R_{min}": (1e6, 1e7, 2e6),
"V_{sprintreqt}": (100, 200, 20)},
fuelconstraint_js)
# This concludes the aircraft example. Try playing around with the sliders up above until you're bored; then check out one of the other examples. Thanks for reading!
# ### Import CSS for nbviewer
#
# If you have a local iPython stylesheet installed, this will add it to the iPython Notebook:
from IPython import utils
from IPython.core.display import HTML
import os
def css_styling():
"""Load default custom.css file from ipython profile"""
base = utils.path.get_ipython_dir()
csspath = os.path.join(base,'profile_default/static/custom/custom.css')
styles = "<style>\n%s\n</style>" % (open(csspath,'r').read())
return HTML(styles)
css_styling()
| docs/source/ipynb/Fuel/Fuel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: U4-S1-NLP-DS6 (Python3)
# language: python
# name: u4-s1-nlp-ds6
# ---
# + [markdown] toc-hr-collapsed=false
# Lambda School Data Science
#
# *Unit 4, Sprint 1, Module 1*
#
# ---
# <h1 id="moduleTitle"> Natural Language Processing Introduction (Prepare)</h1>
#
# "Natural" meaning - not computer languages but spoken/written human languages. The hard thing about NLP is that human languages are far less structured or consistent than computer languages. This is perhaps the largest source of difficulty when trying to get computers to "understand" human languages. How do you get a machine to understand sarcasm, and irony, and synonyms, connotation, denotation, nuance, and tone of voice --all without it having lived a lifetime of experience for context? If you think about it, our human brains have been exposed to quite a lot of training data to help us interpret languages, and even then we misunderstand each other pretty frequently.
#
#
# <h2 id='moduleObjectives'>Learning Objectives</h2>
#
# By the end of end of this module, a student should be able to:
# * <a href="#p1">Objective 1</a>: Tokenze text
# * <a href="#p1">Objective 2</a>: Remove stop words from text
# * <a href="#p3">Objective 3</a>: Perform stemming and lemmatization on tokens
#
# ## Conda Environments
#
# You will be completing each module this sprint on your machine. We will be using conda environments to manage the packages and their dependencies for this sprint's content. In a classroom setting, instructors typically abstract away environment for you. However, environment management is an important professional data science skill. We showed you how to manage environments using pipvirtual env during Unit 3, but in this sprint, we will introduce an environment management tool common in the data science community:
#
# > __conda__: Package, dependency and environment management for any language—Python, R, Ruby, Lua, Scala, Java, JavaScript, C/ C++, FORTRAN, and more.
#
# The easiest way to install conda on your machine is via the [Anaconda Distribution](https://www.anaconda.com/distribution/) of Python & R. Once you have conda installed, read ["A Guide to Conda Environments"](https://towardsdatascience.com/a-guide-to-conda-environments-bc6180fc533). This article will provide an introduce into some of the conda basics. If you need some additional help getting started, the official ["Setting started with conda"](https://conda.io/projects/conda/en/latest/user-guide/getting-started.html) guide will point you in the right direction.
#
# :snake:
#
# To get the sprint environment setup:
#
# 1. Open your command line tool (Terminal for MacOS, Anaconda Prompt for Windows)
# 2. Navigate to the folder with this sprint's content. There should be a `requirements.txt`
# 3. Run `conda create -n U4-S1-NLP python==3.7` => You can also rename the environment if you would like. Once the command completes, your conda environment should be ready.
# 4. Now, we are going to add in the require python packages for this sprint. You will need to 'activate' the conda environment: `source activate U4-S1-NLP` on Terminal or `conda activate U4-S1-NLP` on Anaconda Prompt. Once your environment is activate, run `pip install -r requirements.txt` which will install the required packages into your environment.
# 5. We are going to also add an Ipython Kernel reference to your conda environment, so we can use it from JupyterLab.
# 6. Next run `python -m ipykernel install --user --name U4-S1-NLP --display-name "U4-S1-NLP (Python3)"` => This will add a json object to an ipython file, so JupterLab will know that it can use this isolated instance of Python. :)
# 7. Last step, we need to install the models for Spacy. Run these commands `python -m spacy download en_core_web_md` and `python -m spacy download en_core_web_lg`
# 8. Deactivate your conda environment and launch JupyterLab. You should know see "U4-S1-NLP (Python3)" in the list of available kernels on launch screen.
#
# + [markdown] toc-hr-collapsed=false
# # Tokenze Text (Learn)
# <a id="p1"></a>
# + [markdown] toc-hr-collapsed=true
# ## Overview
#
# > **token**: an instance of a sequence of characters in some particular document that are grouped together as a useful semantic unit for processing
#
# > [_*Introduction to Information Retrival*_](https://nlp.stanford.edu/IR-book/)
#
#
# ### The attributes of good tokens
#
# * Should be stored in an iterable datastructure
# - Allows analysis of the "semantic unit"
# * Should be all the same case
# - Reduces the complexity of our data
# * Should be free of non-alphanumeric characters (ie punctuation, whitespace)
# - Removes information that is probably not relevant to the analysis
# -
# Let's pretend we are trying analyze the random sequence here. Question: what is the most common character in this sequence?
random_seq = "AABAAFBBBBCGCDDEEEFCFFDFFAFFZFGGGGHEAFJAAZBBFCZ"
# A useful unit of analysis for us is going to be a letter or character
tokens = list(random_seq)
print(tokens)
# Our tokens are already "good": in an iterable datastructure, all the same case, and free of noise characters (punctionation, whitespace), so we can jump straight into analysis.
# +
import seaborn as sns
sns.countplot(tokens);
# -
# The most common character in our sequence is "F". We can't just glance at the the sequence to know which character is the most common. We (humans) struggle to subitize complex data (like random text sequences).
#
# > __Subitize__ is the ability to tell the number of objects in a set, quickly, without counting.
#
# We need to chunk the data into countable pieces "tokens" for us to analyze them. This inability subitize text data is the motivation for our discussion today.
# + [markdown] toc-hr-collapsed=true
# ### Tokenizing with Pure Python
# -
sample = "Friends, Romans, countrymen, lend me your ears;"
# ##### Iterable Tokens
#
# A string object in Python is already iterable. However, the item you iterate over is a character not a token:
#
# ```
# from time import sleep
# for num, character in enumerate(sample):
# sleep(.5)
# print(f"Char {num} - {character}", end="\r")
# ```
#
# If we instead care about the words in our sample (our semantic unit), we can use the string method `.split()` to seperate the whitespace and create iterable units. :)
sample.split(" ")
# ##### Case Normalization
# A common data cleaning data cleaning task with token is to standardize or normalize the case. Normalizing case reduces the chance that you have duplicate records for things which have practically the same semantic meaning. You can use either the `.lower()` or `.upper()` string methods to normalize case.
#
# Consider the following example:
import pandas as pd
df = pd.read_csv('./data/Datafiniti_Amazon_Consumer_Reviews_of_Amazon_Products_May19.csv')
# Notice anything odd here?
df['brand'].value_counts()
# Much cleaner
df['brand'] = df['brand'].apply(lambda x: x.lower())
df['brand'].value_counts()
# ##### Keep Only Alphanumeric Characters
# Yes, we only want letters and numbers. Everything else is probably noise: punctionation, whitespace, and other notation. This one is little bit more complicatd than our previous example. Here we will have to import the base package `re` (regular expressions).
#
# The only regex expression pattern you need for this is `'[^a-zA-Z ^0-9]'` which keeps lower case letters, upper case letters, spaces, and numbers.
sample = sample+" 911"
print(sample)
# +
import re
re.sub(r'[^a-zA-Z ^0-9]', '', sample)
# -
# #### Two Minute Challenge
# - Complete the function `tokenize` below
# - Combine the methods which we discussed above to clean text before we analyze it
# - You can put the methods in any order you want
def tokenize(text):
"""Parses a string into a list of semantic units (words)
Args:
text (str): The string that the function will tokenize.
Returns:
list: tokens parsed out by the mechanics of your choice
"""
tokens = re.sub(r'[^a-zA-Z ^0-9]', '', text)
tokens = tokens.lower().split()
return tokens
tokenize(sample)
# + [markdown] toc-hr-collapsed=true
# ## Follow Along
#
# Our inability to analyze text data becomes quickly amphilfied in business context. Consider the following:
#
# A business which sells widgets also collects customer reviews of those widgets. When the business first started out, they had a human read the reviews to look for patterns. Now, the business sells thousands of widgets a month. The human readers can't keep up with the pace of reviews to synthesize an accurate analysis. They need some science to help them analyze their data.
#
# Now, let's pretend that business is Amazon, and the widgets are Amazon products such as the Alexa, Echo, or other AmazonBasics products. Let's analyze their reviews with some counts. This dataset is available on [Kaggle](https://www.kaggle.com/datafiniti/consumer-reviews-of-amazon-products/).
# +
"""
Import Statements
"""
# Base
from collections import Counter
import re
import pandas as pd
# Plotting
import squarify
import matplotlib.pyplot as plt
import seaborn as sns
# NLP Libraries
import spacy
from spacy.tokenizer import Tokenizer
from nltk.stem import PorterStemmer
nlp = spacy.load("en_core_web_lg")
# -
df.head(2)
# How can we count the raw text?
df['reviews.text'].value_counts(normalize=True)[:50]
df['tokens'] = df['reviews.text'].apply(tokenize)
df['tokens'].head()
# #### Analyzing Tokens
# +
# Object from Base Python
from collections import Counter
# The object `Counter` takes an iterable, but you can instaniate an empty one and update it.
word_counts = Counter()
# Update it based on a split of each of our documents
df['tokens'].apply(lambda x: word_counts.update(x))
# Print out the 10 most common words
word_counts.most_common(10)
# -
# Let's create a fuction which takes a corpus of document and returns and dataframe of word counts for us to analyze.
def count(docs):
word_counts = Counter()
appears_in = Counter()
total_docs = len(docs)
for doc in docs:
word_counts.update(doc)
appears_in.update(set(doc))
temp = zip(word_counts.keys(), word_counts.values())
wc = pd.DataFrame(temp, columns = ['word', 'count'])
wc['rank'] = wc['count'].rank(method='first', ascending=False)
total = wc['count'].sum()
wc['pct_total'] = wc['count'].apply(lambda x: x / total)
wc = wc.sort_values(by='rank')
wc['cul_pct_total'] = wc['pct_total'].cumsum()
t2 = zip(appears_in.keys(), appears_in.values())
ac = pd.DataFrame(t2, columns=['word', 'appears_in'])
wc = ac.merge(wc, on='word')
wc['appears_in_pct'] = wc['appears_in'].apply(lambda x: x / total_docs)
return wc.sort_values(by='rank')
# Use the Function
wc = count(df['tokens'])
wc.head()
# +
import seaborn as sns
# Cumulative Distribution Plot
sns.lineplot(x='rank', y='cul_pct_total', data=wc);
# -
wc[wc['rank'] <= 20]['cul_pct_total'].max()
# +
import squarify
import matplotlib.pyplot as plt
wc_top20 = wc[wc['rank'] <= 20]
squarify.plot(sizes=wc_top20['pct_total'], label=wc_top20['word'], alpha=.8 )
plt.axis('off')
plt.show()
# -
# ### Processing Raw Text with Spacy
#
# Spacy's datamodel for documents is unique among NLP libraries. Instead of storing the documents components repeatively in various datastructures, Spacy indexes components and simply stores the lookup informaiton.
#
# This is often why Spacy is considered to be more production grade than library like NLTK.
# +
import spacy
from spacy.tokenizer import Tokenizer
nlp = spacy.load("en_core_web_lg")
# Tokenizer
tokenizer = Tokenizer(nlp.vocab)
# -
# Print out list of tokens
[token.text for token in tokenizer(sample)]
# +
# Tokenizer Pipe
tokens = []
""" Make them tokens """
for doc in tokenizer.pipe(df['reviews.text'], batch_size=500):
doc_tokens = [token.text for token in doc]
tokens.append(doc_tokens)
df['tokens'] = tokens
# -
df['tokens'].head()
wc = count(df['tokens'])
wc.head()
# +
wc_top20 = wc[wc['rank'] <= 20]
squarify.plot(sizes=wc_top20['pct_total'], label=wc_top20['word'], alpha=.8 )
plt.axis('off')
plt.show()
# + [markdown] toc-hr-collapsed=true
# ## Challenge
#
# In the module project, you will apply tokenization to another set of review data and produce visualizations of those tokens.
# + [markdown] toc-hr-collapsed=false
# # Stop Words (Learn)
# <a id="p2"></a>
# -
# ## Overview
# Section Agenda
# - What are they?
# - How do we get rid of them using Spacy?
# - Visualization
# - Libraries of Stop Words
# - Extending Stop Words
# - Statistical trimming
#
# If the visualizations above, you began to notice a pattern. Most of the words don't really add much to our undertanding of product reviews. Words such as "I", "and", "of", etc. have almost no semantic meaning to us. We call these useless words "stop words," because we should 'stop' ourselves from including them in the analysis.
#
# Most NLP libraries have built in lists of stop words that common english words: conjunctions, articles, adverbs, pronouns, and common verbs. The best practice, however, is to extend/customize these standard english stopwords for your problem's domain. If I am studying political science, I may want to exclude the word "politics" from my analysis; it's so common it does not add to my understanding.
# + [markdown] toc-hr-collapsed=true
# ## Follow Along
#
# ### Default Stop Words
# Let's take a look at the standard stop words that came with our spacy model:
# -
# Spacy's Default Stop Words
nlp.Defaults.stop_words
# +
tokens = []
""" Update those tokens w/o stopwords"""
for doc in tokenizer.pipe(df['reviews.text'], batch_size=500):
doc_tokens = []
for token in doc:
if (token.is_stop == False) & (token.is_punct == False):
doc_tokens.append(token.text.lower())
tokens.append(doc_tokens)
df['tokens'] = tokens
# -
df.tokens.head()
# +
wc = count(df['tokens'])
wc_top20 = wc[wc['rank'] <= 20]
squarify.plot(sizes=wc_top20['pct_total'], label=wc_top20['word'], alpha=.8 )
plt.axis('off')
plt.show()
# -
# ### Extending Stop Words
print(type(nlp.Defaults.stop_words))
STOP_WORDS = nlp.Defaults.stop_words.union(['batteries','I', 'amazon', 'i', 'Amazon', 'it', "it's", 'it.', 'the', 'this',])
STOP_WORDS
# +
tokens = []
for doc in tokenizer.pipe(df['reviews.text'], batch_size=500):
doc_tokens = []
for token in doc:
if token.text.lower() not in STOP_WORDS:
doc_tokens.append(token.text.lower())
tokens.append(doc_tokens)
df['tokens'] = tokens
# -
wc = count(df['tokens'])
wc.head()
# +
wc_top20 = wc[wc['rank'] <= 20]
squarify.plot(sizes=wc_top20['pct_total'], label=wc_top20['word'], alpha=.8 )
plt.axis('off')
plt.show()
# -
df['reviews.rating'].value_counts()
# ### Statistical Trimming
#
# So far, we have talked about stop word in relation to either broad english words or domain specific stop words. Another common approach to stop word removal is via statistical trimming. The basic idea: preserve the words that give the most about of variation in your data.
#
# Do you remember this graph?
sns.lineplot(x='rank', y='cul_pct_total', data=wc);
# This graph tells us that only a *handful* of words represented 80% of words in the overall corpus. We can interpret this in two ways:
# 1. The words that appear most frequently may not provide any insight into the mean on the documens since they are so prevalent.
# 2. Words that appear infrequeny (at the end of the graph) also probably do not add much value, because the are mentioned so rarely.
#
# Let's take a look at the words at the bottom and the top and make a decision for ourselves:
wc.tail(20)
wc['appears_in_pct'].describe()
# Frequency of appears in documents
sns.distplot(wc['appears_in_pct']);
# +
# Tree-Map w/ Words that appear in a least 2.5% of documents.
wc = wc[wc['appears_in_pct'] >= 0.025]
sns.distplot(wc['appears_in_pct']);
# -
# ## Challenge
#
# In the module project, you will apply stop word removal to a new corpus. You will focus on applying dictionary based stop word removal, but as a stretch goal, you should consider applying statistical stopword trimming.
# + [markdown] toc-hr-collapsed=false
# # Stemming & Lemmatization (Learn)
# <a id="p3"></a>
# + [markdown] toc-hr-collapsed=false
# ## Overview
#
# You can see from our example above there is still some normalization to do to get a clean analysis. You notice that there many words (*i.e.* 'batteries', 'battery') which share the same root word. We can use either the process of stemming or lemmatization to trim our words down to the 'root' word.
#
# __Section Agenda__:
#
# - Which is which
# - why use one v. other
# - show side by side visualizations
# - how to do it in spacy & nltk
# - introduce PoS in here as well
# + [markdown] toc-hr-collapsed=true
# ## Follow Along
# + [markdown] toc-hr-collapsed=true
# ### Stemming
#
# > *a process for removing the commoner morphological and inflexional endings from words in English. Its main use is as part of a term normalisation process that is usually done when setting up Information Retrieval systems.* - [Martin Porter](https://tartarus.org/martin/PorterStemmer/)
#
# Some examples include:
# - 'ing'
# - 'ed'
# - 's'
#
# These rules are by no means comprehensive, but they are somewhere to start. Most stemming is done by well documented algorithms such as Porter, Snowball, and Dawson. Porter and its newer version Snowball are the most popular stemming algorithms today. For more information on various stemming algorithms check out [*"A Comparative Study of Stemming Algorithms"*](https://pdfs.semanticscholar.org/1c0c/0fa35d4ff8a2f925eb955e48d655494bd167.pdf)
#
#
# Spacy does not do stemming out of the box, but instead uses a different technique called *lemmatization* which we will discuss in the next section. Let's turn to an antique python package `nltk` for stemming.
# +
from nltk.stem import PorterStemmer
ps = PorterStemmer()
words = ["python", "pythoner", "pythoning", "pythoned", "pythonly"]
for word in words:
print(ps.stem(word))
# -
# ### Two Minute Challenge
#
# Apply the Porter stemming algorithm to the tokens in the `df` dataframe. Visualize the results in the tree graph we have been using for this session.
# Put in a new column `stems`
# +
wc = count(df['stems'])
wc_top20 = wc[wc['rank'] <= 20]
squarify.plot(sizes=wc_top20['pct_total'], label=wc_top20['word'], alpha=.8 )
plt.axis('off')
plt.show()
# + [markdown] toc-hr-collapsed=false
# ### Lemmatization
#
# You notice immediately that results are kinda funky - words just oddly chopped off. The Porter algorithm did exactly what it knows to do: chop off endings. Stemming works well in applications where humans don't have to worry about reading the results. Search engines and more broadly information retrival algorithms use stemming. Why? Becuase it's fast.
#
# Lemmatization on the other hand is more methodical. The goal is to transform a word into's base form called a lemma. Plural nouns with funky spellings get transformed to singular tense. Verbs are all transformed to the transitive. Nice tidy data for a visualization. :) However, this tidy data can come at computational cost. Spacy does a pretty freaking good job of it though. Let's take a look:
# +
sent = "This is the start of our NLP adventure. We started here with Spacy."
nlp = spacy.load("en_core_web_lg")
doc = nlp(sent)
# Lemma Attributes
for token in doc:
print(token.text, " ", token.lemma_)
# -
# Wrap it all in a function
def get_lemmas(text):
lemmas = []
doc = nlp(text)
# Something goes here :P
for token in doc:
if ((token.is_stop == False) and (token.is_punct == False)) and (token.pos_!= 'PRON'):
lemmas.append(token.lemma_)
return lemmas
df['lemmas'] = df['reviews.text'].apply(get_lemmas)
df['lemmas'].head()
# +
wc = count(df['lemmas'])
wc_top20 = wc[wc['rank'] <= 20]
squarify.plot(sizes=wc_top20['pct_total'], label=wc_top20['word'], alpha=.8 )
plt.axis('off')
plt.show()
# -
# ## Challenge
#
# You should know how to apply lemmatization with Spacy to a corpus of text.
# # Review
#
# In this module project, you've seen us apply Natural Language Processing techniques (tokenization, stopword removal, and lemmatization) to a corpus of Amazon text reviews. We analyzed those reviews using these techniques and discovered that Amazon customers are generally statisfied with the battery life of Amazon products and generally appear statisfied.
#
# You will apply similiar techniques to today's [module project assignment](LS_DS_411_Text_Data_Assignment.ipynb) to analyze coffee shop reviews from yelp. Remeber that the techniques of processing the text are just the begining. There are many ways to slice and dice the data.
# # Sources
#
# * Spacy 101 - https://course.spacy.io
# * NLTK Book - https://www.nltk.org/book/
# * An Introduction to Information Retrieval - https://nlp.stanford.edu/IR-book/pdf/irbookonlinereading.pdf
# + [markdown] toc-hr-collapsed=true
# ## Advanced Resources & Techniques
# - Named Entity Recognition (NER)
# - Dependcy Trees
# - Generators
# - the major libraries (NLTK, Spacy, Gensim)
| curriculum/unit-4-machine-learning/sprint-1-nlp/module1-text-data/LS_DS_411_Text Data_Lecture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_rows', 50)
df = pd.read_csv('/kaggle/input/lending-club-dataset/lending_club_loan_two.csv')
df.head()
# ## Variable Description
# **loan_amnt** >> The amount of the loan applied for by the borrower.
#
# **term** >> The number of payments on the loan. Values are in months and can be either 36 or 60.
#
# **int_rate** >> Interest rate.
#
# **installment** >> The monthly payment for the loan.
#
# **grade** >> LC assigned loan grade
#
# **emp_title** >> Job title of the borrower
#
# **emp_length** >> Employment lenghth in years
#
# **verification_status** >> Indicates whether thte income source has been verified
#
# **title** >> Loan title provided by the borrower
#
# **annual_inc** >> The self-reported annual income provided by the borrower during registration.
#
# **loan_status** >> Current status of the loan.
#
# **zip_code** >> The first 3 numbers of the zip code provided by the borrower in the loan application.
#
# **addr_state** >> The state provided by the borrower in the loan application
#
# **dti** >> A ratio calculated using the borrower’s total monthly debt payments on the total debt obligations, excluding mortgage and the requested LC loan, divided by the borrower’s self-reported monthly income.
#
# **earliest_cr_line** >> The year the borrower's earliest reported credit line was opened
#
# **open_acc** >> The number of open credit lines in the borrower's credit file.
#
# **pub_rec** >> Number of derogatory public records
#
# **revol_bal** >> Total credit revolving balance
#
# **revol_util** >> Revolving line utilization rate, or the amount of credit the borrower is using relative to all available revolving credit.
#
# **total_acc** >> The total number of credit lines currently in the borrower's credit file
#
# **mort_acc** >> Presence of mortgage accounts-- (1-present, 0-not present)
#
# **pub_rec_bankruptcies** >> Presence of public record bankruptcies-- (1-present, 0-not present)
#
# **home_ownership** >> The home ownership status provided by the borrower during registration or obtained from the credit report. Our values are: RENT, OWN, MORTGAGE, OTHER
#
# **sub_grade** >> LC assigned loan subgrade
#
# **purpose** >> A category provided by the borrower for the loan request.
#
# **application_type** >> Indicates whether the loan is an individual application or a joint application with two co-borrowers
#
df.info()
df.describe()
# ## Missing Values
# +
# Check for missing values
def missing_data(frame):
total = frame.isnull().sum().sort_values(ascending=False)
percent = (frame.isnull().sum()/frame.isnull().count()*100).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
return missing_data
missing_data(df)
# -
# ## Target Variable
# Percentage for each loan status type
df['loan_status'].value_counts(normalize=True)
# Visualizing the target variable
import seaborn as sns
sns.countplot(x=df['loan_status'])
# ## Features
# Display the number of unique values for each feature
features = df.columns.values
unique = pd.DataFrame(df[features].nunique().sort_values(), columns = ['UniqueValCounts'])
unique
cols = ['term', 'int_rate', 'installment', 'annual_inc', 'dti', 'earliest_cr_line', 'sub_grade',
'open_acc', 'pub_rec', 'revol_bal', 'revol_util', 'total_acc', 'mort_acc', 'pub_rec_bankruptcies']
# Show Unique Value of Columns that have <100 unique entries
for col in cols:
print("column : " + col)
print(df[col].unique())
print("")
# +
# Plot the distribution of the numerical values
import matplotlib.pyplot as plt
dist_cols = ['int_rate','installment', 'annual_inc', 'dti', 'open_acc', 'total_acc']
fig, ax = plt.subplots(3, 2, figsize=(10,10))
sns.set()
i = 1
for col in dist_cols:
ax = plt.subplot(3, 2, i, label='small')
sns.distplot(df[col])
ax.set_title(col, fontsize=12)
plt.xticks(rotation=45)
i += 1
fig.tight_layout()
plt.show()
# -
# Check correlations between numerical vars
plt.figure(figsize=(12, 8))
sns.heatmap(df.corr(), annot=True)
# +
# Plot the distribution of the categorical values
count_cols = ['term','mort_acc', 'pub_rec_bankruptcies', 'pub_rec', 'home_ownership', 'verification_status','purpose',
'grade', 'sub_grade', 'application_type']
fig, ax = plt.subplots(5, 2, figsize=(20,25))
sns.set()
i = 1
for col in count_cols:
ax = plt.subplot(5, 2, i, label='small')
sns.countplot(x=col, data=df, hue='loan_status')
ax.set_title(col, fontsize=12)
plt.xticks(rotation=90)
i += 1
fig.tight_layout()
plt.show()
# -
# Check the job titles of the borrowers
df.emp_title.unique()
# +
# Transform the emp_title variable and prepare for visualization
df.emp_title = df.emp_title.str.lower()
def manager(string):
if type(string) is str:
return 'manager' if 'manager' in string else string
def president(string):
if type(string) is str:
return 'president' if 'president' in string else string
def nurse(string):
if type(string) is str:
return 'nurse' if 'nurse' in string else string
def driver(string):
if type(string) is str:
return 'driver' if 'driver' in string else string
def assistant(string):
if type(string) is str:
return 'assistant' if 'assistant' in string else string
def engineer(string):
if type(string) is str:
return 'engineer' if 'engineer' in string else string
functions = [manager, president, nurse, driver, assistant, engineer]
for func in functions:
df['emp_title'] = df.emp_title.apply(func)
# -
df['emp_title'].value_counts()
# Plot the 10 most common job titles of borrowers
plt.barh(df.emp_title.value_counts()[:10].index, df.emp_title.value_counts()[:10])
# Check the employment lengths of the borrowers
df.emp_length.unique()
# +
# Transform the emp_length variable and prepare for visualization
order = ['< 1 year', '1 year', '2 years', '3 years', '4 years', '5 years',
'6 years', '7 years', '8 years', '9 years', '10+ years',]
plot = sns.countplot(x='emp_length', data=df, hue='loan_status', order=order)
plot.set_xticklabels(labels=order,rotation=45)
# -
# ## Data Pre-Processing
# Transform the target variable to numeric
df['loan_status'] = df.loan_status.map({'Fully Paid':0, 'Charged Off':1})
# +
# Transform pub_rec, pub_rec, pub_rec_bankruptcies to handle the outliers
def pub_rec(number):
if number == 0.0:
return 0
else:
return 1
def mort_acc(number):
if number == 0.0:
return 0
elif number >= 1.0:
return 1
else:
return number
def pub_rec_bankruptcies(number):
if number == 0.0:
return 0
elif number >= 1.0:
return 1
else:
return number
df['pub_rec'] = df.pub_rec.apply(pub_rec)
df['mort_acc'] = df.mort_acc.apply(mort_acc)
df['pub_rec_bankruptcies'] = df.pub_rec_bankruptcies.apply(pub_rec_bankruptcies)
# -
# Drop job titles since they are too many to be one hot encoded
df.drop('emp_title', axis=1, inplace=True)
df.title.unique()
df.purpose.unique()
df.home_ownership.value_counts()
# Merge NONE and ANY with OTHER
df.loc[(df.home_ownership == 'ANY') | (df.home_ownership == 'NONE'), 'home_ownership'] = 'OTHER'
# Drop title and emp_length column since they doesn't provide insightful information for training
df.drop(['title', 'emp_length'], axis=1, inplace=True)
# Impute the missing values for mort_acc feature
total_acc_avg = df.groupby(by='total_acc').mean().mort_acc
def fill_mort_acc(total_acc, mort_acc):
if np.isnan(mort_acc):
return total_acc_avg[total_acc].round()
else:
return mort_acc
df['mort_acc'] = df.apply(lambda x: fill_mort_acc(x['total_acc'], x['mort_acc']), axis=1)
df.mort_acc.isnull().mean()
# Transform feature to numeric
df['term'] = df.term.replace(' 36 months', 36).replace(' 60 months', 60)
df.term.unique()
# Drop grade as it provides the ame information as sub_grade
df.drop('grade', axis=1, inplace=True)
df.dropna(inplace=True)
# One-hot encoding for categorical variables
dummies = ['sub_grade', 'verification_status', 'purpose', 'initial_list_status',
'application_type', 'home_ownership']
df = pd.get_dummies(df, columns=dummies, drop_first=True)
# Create zipcode feature from address(better indicator than the addres itself)
df['zip_code'] = df.address.apply(lambda x: x[-5:])
# One-hot encoding
df = pd.get_dummies(df, columns=['zip_code'], drop_first=True)
# Drop address feaure as we don't need it anymore
df.drop('address', axis=1, inplace=True)
# Drop issue_d column since they doesn't provide insightful information for training
df.drop('issue_d', axis=1, inplace=True)
# Extract year from earliest_cr_line
df['earliest_cr_line'] = df.earliest_cr_line.str.split('-', expand=True)[1]
# PLotting the yars when the credit file have been opened
fig, ax = plt.subplots(figsize=(12, 6))
sns.countplot(df['earliest_cr_line'], label='vertical', ax=ax)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90, horizontalalignment='right')
plt.show()
# Drop duplicates
df.drop_duplicates(inplace=True)
df.shape
df.columns
# ## More Visualizations
# +
# Binning of the numerical values
df["loan_amnt_interval"] = pd.cut(
x=df["loan_amnt"],
bins=[0, 5000, 9300, 12000, 25000],
include_lowest=True,
labels=["LOW", "MEDIUM", "HIGH", "VERY HIGH"],
)
# Plotting the numerical values against the target variable
sns.barplot(x=df["loan_amnt_interval"], y=df["loan_status"])
# -
# Binning of the numerical values
data["annual_inc_interval"] = pd.cut(
x=data["annual_inc"],
bins=[0, 40000, 53000, 71000, 120000],
include_lowest=True,
labels=["LOW", "MEDIUM", "HIGH", "VERY HIGH"],
)
# Plotting the numerical values against the target variable
sns.barplot(x=data["annual_inc_interval"], y=data["loan_status"])
# Binning of the numerical values
data["installment_interval"] = pd.cut(
x=data["installment"],
bins=[0, 156, 250, 368, 905],
include_lowest=True,
labels=["LOW", "MEDIUM", "HIGH", "VERY HIGH"],
)
# Plotting the numerical values against the target variable
sns.barplot(x=data["installment_interval"], y=data["loan_status"])
# Binning of the numerical values
data["dti_interval"] = pd.cut(
x=data["dti"],
bins=[0, 9, 14, 19, 30],
include_lowest=True,
labels=["LOW", "MEDIUM", "HIGH", "VERY HIGH"],
)
# Plotting the numerical values against the target variable
sns.barplot(x=data["dti_interval"], y=data["loan_status"])
# Plotting the categorical values against the target variable
sns.barplot(x=data["pub_rec_bankruptcies"], y=data["loan_status"])
# Plotting the categorical values against the target variable
sns.barplot(x=data["term"], y=data["loan_status"])
# Plotting the categorical values against the target variable
sns.barplot(x=data["mort_acc"], y=data["loan_status"])
# ## Observations
# We have a dataset of shape (395219, 79). Some of the columns have already been one-hot encoded, thus, the total number of unique features is actually 21.
#
# Some interesting facts about our data:
# * Minimum amount of load is 500, while the maximum 40k
# * Minimum interest rate is 5.3% , while the maximum 31% (looks like an outlier)
# * Some borrowers have 0 annual income
# * The oldest loan application opened was in 1944
# * 80% of the loans in our dataset have been fully paid while 19,6% have been charged off
# * All the numericala features plotted have a distribution skewed to the left, which might indicate the existent of some extreme values.
# * The most common term for loan repayment is 36 months.
# * Most of the borrowers have a mortgage account.
# * 1/7 of the borrowers have a public record of bankrupcy
# * 2001 and 2002 have been the years with the most loan application filed.
#
# When looking at some of the feature and their relationship with the defaulter_rate(charged off/total) we observe:
# * Borrowers with loan amounts with 25k> are more likeyl to default. We see the same trend in the installments feature and dti.
# * Borrowers with a low annual income are almost 25% likely to default
# * Borrowers with a public record of bankrupcy are as likely to default as borrowers without a record
# * Borrowers who have a mortgage to pay are eless likely to default
# * Borrowers with a lan repyment of 60 months are twice more prone to default (probably becaus they have higher amount to repay.)
#
#
#
| EdaAndDataProcessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project : Test a Perceptual Phenomenon
# ## Analyzing the Stroop Effect
# In this project we will investigate a classic phenomenon from experimental psychology known as the [Stroop Effect](https://en.wikipedia.org/wiki/Stroop_effect).
#
# We will begin by learning a little bit about the experiment, creating a hypothesis regarding the outcome of the task, then going through the task ourselves. We will look at data collected from others who have already done this task, following which we will conduct our own statistical analysis. Finally, we will interpret our results in terms of our hypotheses.
# ### Q1. What is the independent variable? What is the dependent Variable?
#
# - **Independent Variabe:** The 'congruency' or 'incongruency' of words is the independent variable.
# - **Dependent Variable:** The time it takes to view and complete the test.
# ### Q2. What is an appropriate set of hypotheses for this task? Specify the null and alternative hypotheses based on what you think the researchers might be interested in. Justify your choices.
#
#
# Our Null and Alternate Hypotheses are:
#
# - **Null Hypothesis:** The time taken to complete the test is *NOT* influenced by congruency. Therefore:
#
# $$ H_0: \mu_{incongruent} \leq \mu_{congruent} $$
#
# - **Alternate Hypothesis:** The time taken for incongruent words is *GREATER* than the time taken for congruent words, thus:
#
# $$ H_1: \mu_{incongruent} \gt \mu_{congruent} $$
#
# _Here, $ \mu $ is the population mean._
#
# - **Statistical Test: Paired T-Test**
# - The paired sample t-test, sometimes called the dependent sample t-test, is a statistical procedure used to determine whether the mean difference between two sets of observations is zero.
# - A t-test is used because the population variance is unknown and the sample size is less than 30.
#
# ### Q3. Report some descriptive statistics regarding this dataset. Include at least one measure of central tendency and at least one measure of variability.
# Importing libraries
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import t
# %matplotlib inline
sns.set_style('darkgrid')
# Loading data into the DataFrame.
df = pd.read_csv('./stroopdata.csv')
# Displaying 5 rows of the DataFrame.
df.head(5)
# **Analyzing measures of central tendency and variability:**
#
# Calculating the necessary means, medians and standard deviations.
# Calculating means
mean_c = round(df['Congruent'].mean(), 2)
mean_ic = round(df['Incongruent'].mean(), 2)
print("The mean of congruent words is: {}\n"
"The mean of incongruent words is: {}".format(mean_c, mean_ic))
# Calculating medians
med_c = round(df['Congruent'].median(), 2)
med_ic = round(df['Incongruent'].median(), 2)
print("The median of congruent words is: {}\n"
"The median of incongruent words is: {}".format(med_c, med_ic))
# Calculating standard deviations
std_c = round(df['Congruent'].std(), 2)
std_ic = round(df['Incongruent'].std(), 2)
print("The standard deviation of congruent words is: {}\n"
"The standard deviation of incongruent words is: {}".format(std_c, std_ic))
# ### Q4. Provide one or two visualizations that show the distribution of the sample data. Write one or two sentences noting what you observe about the plot or plots.
plt.hist(df['Congruent'], facecolor='g', alpha=0.75)
plt.xlabel("Time(seconds)")
plt.ylabel("Participants")
plt.title("Time taken - Congruent Words");
# **Observations:** In this histogram, we can observe that 6 participants, being the largest number, took between 14 and 16 seconds to complete the test for congruent words. The sample above can be said to have a normal distribution.
plt.hist(df['Incongruent'], facecolor='g', alpha=0.75)
plt.xlabel("Time(seconds)")
plt.ylabel("Participants")
plt.title("Time taken - Incongruent Words");
# **Observations:** In this histogram, we can observe that 6 participants, the largest number, took approximately 20 seconds to complete the test for incongruent words. What is interesting to note is that 2 participants, as the outliers in this sample, took more than 33 seconds to complete the same test. Thus, this plot is right-skewed.
# ### Q5. Perform the statistical test and report your results. What is your confidence level or Type I error associated with your test? What is your conclusion regarding the hypotheses you set up? Did the results match up with your expectations?
# As mentioned in Q2, the dependent and paired T-Test will be used as our statistical test.
#
# - **Confidence Level:** For this test, we will establish a confidence level of 99%.
# - **Degrees of Freedom:** Since the sample size is 24, then the degree of freedom will be (n-1) i.e. 23.
# - **Point Estimate:** The point estimate is the difference of means for congruent and incongruent words i.e. 22.02 - 14.05 = 7.97
# Calculate the T-Critical value for a 99% confidence level
print("T-Critical Value:", t.ppf(.99, 23))
# In order to determine the standard deviation of differences, we will first compute the differences in values in the new column "Difference".
df['Difference'] = df['Congruent'] - df['Incongruent']
# Verifying changes
df.head(5)
# We will now calculate the standard deviation of differences of values in the dataset:
# Standard deviation of differences
print("The standard deviation of differences: ", round(df['Difference'].std(axis=0), 2))
# **Calculating T-Statistic:** Now that we have the necessary values, our T-Statistic is:
print("T-Statistic: ", (7.97 / (4.86 / math.sqrt(24))))
# **Results:** It can be observed that our T-Critical value is **2.4998**, which is certainly not equal to our T-Statistic **8.033**.
#
# Thus, with according to the findings above, we will **reject the null hypothesis**. The T-Test confirms the fact that incongruent tests take longer than congruent tests.
# ### Q6. What do you think is responsible for the effects observed? Can you think of an alternative or similar task that would result in a similar effect?
# Upon further investigation into the Stroop effect, in a paper from the University of Waterloo, Canada - <NAME> refers to three major explanations which have emerged since <NAME> first published his seminal work in 1935:
#
# 1. "The first of these was Cohen, Dunbar, and McClelland’s parallel distributed processing, or connectionist, model, proposed in 1991. At its core, their theory is a strength theory, designed as it was to capture the training data reported by MacLeod and Dunbar. Processing pathways gain strength with practice, and relative strength determines likelihood and degree of interference. Thus, given our extensive experience with reading, color-word pathways ordinarily are much more strongly connected to color name responses than are color pathways."
#
# 2. The second came in 2003 where "Melara and Algom coming from a fundamental perception perspective, proposed that two factors underlie Stroop interference: dimensional imbalance and dimensional uncertainty. Dimensional imbalance reflects how correlated the two dimensions of a stimulus are and how surprising a stimulus is and determines the ease of recovery of a stimulus representation from memory. Dimensional uncertainty reflects how salient a stimulus is, notably how likely or unlikely it is in the context of other(recently presented) stimuli. Together, these two factors determine the success of attentional selection by focusing on salient, surprising, and/or correlated information contained within each dimension and across the two dimensions of a Stroop stimulus. Each influences excitation of targets and inhibition of distractors.Stroop interference occurs both because there is more uncertainty in the colors than in the words and because the words are more salient than the colors."
#
# 3. The third, also in 2003, "Roelofs proposed his model of Stroop interference, a model situated in an already implemented model of word production (WEAVER ++) from the psycholinguistic literature. This also can be viewed as a two-factor model, with processing interactions occurring in the system that carries out language production, modulated by a supervisory attentional system that maintains task control. Roelofs posited that different architectures underlie color naming and word reading, with color naming, because it is conceptually driven, requiring an extra step due to colors not being directly connected to their names, unlike words."
#
# It is interesting to note that the Stroop-like effects have been observed in alternate tasks which shine a new light on the cognitive functions of humans.
#
# In 1998, Baldo, Shimamura and Prinzmetal, from the University of California in Berkely, studied the response-compatibility effects in an arrow-word "Stroop like paradigm" i.e. subjects were asked to respond to either an arrow or a word, while ignoring the other. They observed that "response compatibility played a significant role in generating Stroop-like interference. Robust interference effects were observed when the subjects responded manually to word stimuli (ignoring irrelevant arrows) and when they responded vocally to arrow stimuli (ignoring irrelevant words). Smaller interference effects were observed under response-compatible conditions, namely, responding manually to arrows and vocally to words. In the second experiment, within-dimension displays (e.g., arrow-arrow or word-word displays) yielded a pattern of interference that did not interact with response modality. These findings indicate that both stimulus-response compatibility effects and target-distractor similarity are crucial for understanding Stroop-like interference."
# ## Sources:
#
# [Paired Sample T-Test - Statistics Solutions](http://www.statisticssolutions.com/manova-analysis-paired-sample-t-test/)
#
# [Stroop Effect - Wikipedia](https://en.wikipedia.org/wiki/Stroop_effect)
#
# [T-Score Vs. Z-Score - Statistics How To](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/hypothesis-testing/t-score-vs-z-score/)
#
# [Standardized Test Statistic - Statistics How To](https://www.statisticshowto.datasciencecentral.com/standardized-test-statistic/)
#
# [The Stroop Effect - <NAME> - University of Waterloo](http://imbs.uci.edu/~kjameson/ECST/MacLeod_TheStroopEffect.pdf)
#
# [Mapping Symbols to Response Modalities: Interference Effects on Stroop-like Tasks](https://link.springer.com/content/pdf/10.3758/BF03206864.pdf)
| Test-a-perceptual-phenomenon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.2 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# +
#21 直方图归一化
#灰度变换
import cv2
import numpy as np
import matplotlib.pyplot as plt
def hist_normalization(img, a=0, b=255):
c = img.min()
d = img.max()
out = img.copy()
#这里在调试过程中会出现超出范围的情况 所以需要后面两行
#理论计算中是不会超出去的 应该是和浮点计算有关
out = (b-a) / (d-c) * (out - c) + a
out[out < a] = a
out[out > b] = b
out = out.astype(np.uint8)
return out
img = cv2.imread("imori_dark.jpg").astype(np.float)
H, W, C = img.shape
# histogram normalization
out = hist_normalization(img)
# Display histogram
plt.hist(out.ravel(), bins=255, rwidth=0.8, range=(0, 255))
plt.savefig("myanswers/out_21_2.png")
plt.show()
cv2.imwrite("myanswers/out_21_1.jpg",out)
# +
#22 直方图操作
import cv2
import numpy as np
import matplotlib.pyplot as plt
# 对图像浮点读取之后进行运算,注意处理最大值和最小值出界问题
def hist_mani(img, m0=128, s0=52):
out = img.copy()
m = np.mean(img)
s = np.std(img)
out = s0 / s * (out - m) + m0
out[out < 0] = 0
out[out > 255] = 255
out = out.astype(np.uint8)
return out
img = cv2.imread("imori_dark.jpg").astype(np.float)
out = hist_mani(img)
# Display histogram
plt.hist(out.ravel(), bins=255, rwidth=0.8, range=(0, 255))
plt.savefig("myanswers/out_22_2.png")
plt.show()
# Save result
cv2.imwrite("myanswers/out_22_1.jpg", out)
# +
#23 直方图均衡化
#使直方图变得平坦
#累计分布函数:对于离散变量而言,所有小于等于a的值出现概率的和
import cv2
import numpy as np
import matplotlib.pyplot as plt
def hist_equalization(img,z_max=255):
H, W, C = img.shape
S = H * W * C * 1.
out = img.copy()
sum_h = 0.
for i in range(1,z_max):
ind = np.where(img == i)
sum_h += len(img[ind])
z_prime = z_max / S * sum_h
out[ind] = z_prime
out = out.astype(np.uint8)
return out
img = cv2.imread("imori_dark.jpg").astype(np.float)
# histogram normalization
out = hist_equalization(img)
# Display histogram
plt.hist(out.ravel(), bins=255, rwidth=0.8, range=(0, 255))
plt.savefig("myanswers/out_23_2.png")
plt.show()
cv2.imwrite("myanswers/out_23_1.jpg",out)
# +
# 24 伽马矫正
# 预先增大RGB的值来排除显示器的影响,修正图像
import cv2
import numpy as np
import matplotlib.pyplot as plt
def gamma_correction(img, c=1, g=2.2):
out = img.copy()
out /= 255.
out = (1/c)*(out**(1/g))
out *= 255
out = out.astype(np.uint8)
return out
# Read image
img = cv2.imread("imori_gamma.jpg").astype(np.float)
# Gammma correction
out = gamma_correction(img, c=1, g=2.2)
cv2.imwrite("myanswers/out_24.jpg", out)
# +
# 25 最近邻插值
# 放大图像,补充像素取最临近
import cv2
import numpy as np
import matplotlib.pyplot as plt
def nn_interpolate(img, ax=1, ay=1):
H,W,_ = img.shape
aH = int(ay * H)
aW = int(ax * W)
y = np.arange(aH).repeat(aW).reshape(aW,-1) #横坐标为W,纵坐标为H
# np.tile将原矩阵横向、纵向复制
x = np.tile(np.arange(aW), (aH, 1))
# x = np.arange(aW).repeat(aH).reshape(aH, -1)#横坐标为H,纵坐标为W
y = np.round(y / ay).astype(np.int)
x = np.round(x / ax).astype(np.int)
out = img[y, x]
out = out.astype(np.uint8)
return out
img = cv2.imread("imori.jpg").astype(np.float)
# Nearest Neighbor
out = nn_interpolate(img, ax=1.5, ay=1.5)
# Save result
cv2.imwrite("myanswers/out_25.jpg", out)
# +
# 26 双线性插值
# +
# 27 双三次插值
# +
# 28 仿射变换
| Question_21_30/myanswers_21_30.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="LdNU892PIozd"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# + [markdown] id="gXSlrsDd876H"
# # **Load Data**
# + id="XXYefSPSOidg"
data = pd.read_csv('/content/drive/MyDrive/PIAIC/credit_card/creditcard.csv')
# + id="sRLd_IhsiIZc"
# from google.colab import drive
# drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/", "height": 308} id="5O1cZFf1OmFo" outputId="62fb76d9-819a-422d-c139-66b1ce9ae26c"
data.describe()
# + [markdown] id="WedbRy3l9Nb6"
# # **Check Missing Values ( If Exist ; Fill each record with mean of its feature )**
# + colab={"base_uri": "https://localhost:8080/"} id="SZ3K03HAP20X" outputId="b5fa9e38-5940-4f79-8c80-dca921fbae81"
# %time data.isnull().any()
# + [markdown] id="z3Xe-5Fa_6up"
# # **Standardized the Input Variables**
# + id="HeHjcHCX9Xbx"
data.reset_index(drop=True, inplace=True)
labels = data.pop('Class')
# + id="0ALZLF1tAYjA"
data -= data.mean()
data /= data.std()
# + colab={"base_uri": "https://localhost:8080/", "height": 378} id="Ry-8pJmyDbeP" outputId="88b63f1a-acb0-4f1c-c0a7-473a6632cda8"
data.describe()
# + [markdown] id="Ai7j-PDlGnLC"
# # **Split into 50% Training(Samples,Labels) , 30% Test(Samples,Labels) and 20% Validation Data(Samples,Labels).**
# + id="eSZjq_rmDfIy"
data_len = len(data)
train_data = data.iloc[:data_len*50//100]
val_data = data.iloc[data_len*50//100:data_len*70//100]
test_data = data.iloc[data_len*70//100:]
labels_len = len(labels)
train_labels = labels.iloc[:labels_len*50//100]
val_labels = labels.iloc[labels_len*50//100:labels_len*70//100]
test_labels = labels.iloc[labels_len*70//100:]
# + id="MQcVrYYRFu03"
# from google.colab import drive
# drive.mount('/content/drive')
# + [markdown] id="fHbTZIW2IEMk"
# # **Model : input Layer (No. of features ), 3 hidden layers including 10,8,6 unit & Output Layer with activation function relu/tanh (check by experiment).**
# + id="l6Abpitwh7YH"
from tensorflow.keras import models
from tensorflow.keras import layers
import tensorflow as tf
network = models.Sequential()
network.add(layers.Dense(10,activation="tanh",input_shape=(len(train_data.columns),) ))
network.add(layers.Dense(8,activation="tanh",input_shape=(data.shape[1],) ))
network.add(layers.Dense(6,activation="tanh",input_shape=(data.shape[1],) ))
network.add(layers.Dense(1,activation="sigmoid"))
network.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
# + colab={"base_uri": "https://localhost:8080/"} id="sFaARslvipqH" outputId="328ba64b-7d92-425c-8721-25e70b5cbba5"
network.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="G1Uol4PIFJYR" outputId="3b173d8c-f464-4c37-8739-fb7eaf4d92a9"
with tf.device('/device:GPU:1'):
# %time MODEL = network.fit(train_data,train_labels,epochs=10,batch_size=256,validation_data=(val_data,val_labels))
# + colab={"base_uri": "https://localhost:8080/"} id="UG8oxKVxKMi8" outputId="254991d4-b073-4ae0-a377-00547319298f"
test_loss , test_acc = network.evaluate(test_data,test_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="TmosmJihKSlT" outputId="e8daec46-2aff-495f-9932-a012bbc3b07d"
test_labels[test_labels == 1]
# + colab={"base_uri": "https://localhost:8080/", "height": 312} id="GKpgJUcsZym6" outputId="c8b5929c-384e-4791-cb0d-1bea6fcb0ffc"
history_dict = MODEL.history
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
epoches = np.arange(1,len(history_dict['acc'])+1)
plt.plot(epoches,acc_values,'bo',label="Training Accuracy")
plt.plot(epoches,val_acc_values,'b',label="Validation Accuracy")
plt.title('Training and validation Accuracy')
plt.xlabel("Epoches")
plt.ylabel("Accuracy")
plt.legend()
# plt.show()
# + [markdown] id="-k-7aD606f29"
# # **Doing With RELU**
# + colab={"base_uri": "https://localhost:8080/"} id="oGuUVIzAa4sx" outputId="8feff394-932d-45d9-b69b-fa0e1efa9c72"
from tensorflow.keras import models
from tensorflow.keras import layers
import tensorflow as tf
network1 = models.Sequential()
network1.add(layers.Dense(30,activation="relu",input_shape=(len(train_data.columns),) ))
network1.add(layers.Dense(20,activation="relu" ))
network1.add(layers.Dense(10,activation="relu"))
network1.add(layers.Dense(1,activation="sigmoid"))
network1.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
with tf.device('/device:GPU:1'):
# %time MODEL1 = network1.fit(train_data,train_labels,epochs=10,batch_size=512,validation_data=(val_data,val_labels))
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="nmZW8vyPb62f" outputId="921709c9-e60c-4ece-a550-9ba3514afe63"
history_dict = MODEL1.history
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
epoches = np.arange(1,len(history_dict['acc'])+1)
plt.plot(epoches,acc_values,'bo',label="Training Accuracy")
plt.plot(epoches,val_acc_values,'b',label="Validation Accuracy")
plt.title('Training and validation Accuracy')
plt.xlabel("Epoches")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
# + [markdown] id="KW_po7htD_Tt"
# # **Evaluation Step**
# + colab={"base_uri": "https://localhost:8080/"} id="NNixGItObakm" outputId="61d6b0e8-80ff-4df2-fd85-03afe11cd01e"
test_loss , test_acc = network1.evaluate(test_data,test_labels)
# + id="5JwYtiY8JBRB"
# + [markdown] id="7bXsxUy_EHEW"
# # **Predict**
# + colab={"base_uri": "https://localhost:8080/"} id="bPYCV-tSELoC" outputId="53320638-1470-4a36-ae78-9266e2eb90e5"
prediction_test_data = test_data
predictions = network1.predict(prediction_test_data)
hit = np.argmax(predictions) == np.array(test_labels)
print(f"{len(hit)} out of {len(predictions)} ")
# + id="4BQo5AeCJH7p"
y_pred = (predictions > 0.5).astype('u8')
# + colab={"base_uri": "https://localhost:8080/"} id="mbrZUqY5Gfrp" outputId="13ebbcb0-5fc9-4310-8aa5-851536113c3b"
print(f"Accuracy is upto {test_acc*100}%")
# + colab={"base_uri": "https://localhost:8080/"} id="w41dhQp7HTLv" outputId="5e39f49d-d0e2-4ffc-b852-f9a94a2717bb"
tf.math.confusion_matrix(
test_labels, y_pred, num_classes=None, weights=None, dtype=tf.dtypes.int32,
name=None
)
# + colab={"base_uri": "https://localhost:8080/"} id="eRyLmAYZIXwe" outputId="d4e9af5d-fd73-4dd6-a082-dd243b74cedc"
p.shape
# + id="TmUK8A7phsnG"
p=np.argmax(predictions)
| PIAIC Assignments/Deep Learning Assignments Set/Credit Card Fraud Detection assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from miscpy.utils.sympyhelpers import *
init_printing()
# 
a,n,mu,r,e,h = symbols("a,n,mu,r,e,h",positive=True)
ed,I,Id,O,Od,w,wd,th,thd,hd,t,f_r,f_th,f_h,v_r,v_th,nu = \
symbols("edot,I,Idot,Omega,Omegadot,omega,omegadot,\
theta,thetadot,hdot,t,f_r,f_theta,f_h,v_r,v_theta,nu",real=True)
# Define required rotation matrices ($\Omega$ rotation about $\mathbf{\hat{e}}_3$, $I$ about $\mathbf{\hat{n}}$ and $\theta$ rotation about $\mathbf{\hat{h}}$)
rot1 = Matrix(([cos(O),sin(O),0],[-sin(O),cos(O),0],[0,0,1]))
rot2 = Matrix(([1,0,0],[0,cos(I),sin(I)],[0,-sin(I),cos(I)]))
rot3 = Matrix(([cos(th),sin(th),0],[-sin(th),cos(th),0],[0,0,1]))
rot1,rot2,rot3
# $^\mathcal{I}\boldsymbol{\omega}^\mathcal{B} = \dot\Omega \mathbf{\hat{e}}_3 + \dot I \mathbf{\hat{n}} + \dot\theta \mathbf{h}$
IwB = rot3*rot2*Matrix([0,0,Od])+ rot3*Matrix([Id,0,0]) + Matrix([0,0,thd]); IwB
# $\frac{^\mathcal{I}\textrm{d}\mathbf{h}}{\textrm{d}t} = \frac{^\mathcal{B}\textrm{d}\mathbf{h}}{\textrm{d}t} + {}^\mathcal{I}\boldsymbol{\omega}^\mathcal{B} \times \mathbf{h} = \mathbf{r}\times \mathbf{f}$
hvec = Matrix([0,0,h])
dhdt1 = difftotalmat(hvec,t,{h:hd}) + IwB.cross(hvec)
rvec = Matrix([r,0,0])
fvec = Matrix([f_r,f_th,f_h])
dhdt2 = rvec.cross(fvec)
sol1 = solve(dhdt1-dhdt2,[hd,Od,Id]); sol1
#
# $\frac{^\mathcal{I}\textrm{d}\mathbf{e}}{\textrm{d}t} = \frac{1}{\mu}\left(\mathbf{f} \times \mathbf{h} + \mathbf{v} \times \mathbf{r} \times \mathbf{f}\right)$
evec = Matrix([e*cos(th - w),-e*sin(th - w),0])
dedt1 = simplify(difftotalmat(evec,t,{e:ed,th:thd,w:wd})+ IwB.cross(evec))
vvec = Matrix([v_r,v_th,0])
dedt2 = (fvec.cross(hvec) + vvec.cross(rvec.cross(fvec)))/mu
dedt3 = simplify(dedt1.subs([(Od,f_h*r*sin(th)/(h*sin(I))),(Id,f_h*r*cos(th)/h),(w,th-nu)]))
sol2 = simplify(solve(dedt3 - dedt2.subs(r*v_th,h).subs(v_r,mu/h*e*sin(nu)),[ed,wd])); sol2
collect(simplify(sol2[ed]),f_th)
# $^\mathcal{I}\boldsymbol{\omega}^\mathcal{B} \cdot \mathbf{\hat{h}} = \frac{v_\theta}{r}$
IwB.dot(Matrix([0,0,1]))
# $h = \sqrt{\mu a (1 - e^2)}$
ad = difftotal(h**2/mu/(1 - e**2),t,{h:hd,e:ed}); ad
ad = ad.subs(hd,sol1[hd]).subs(ed,sol2[ed]).subs(v_r,mu/h*e*sin(nu));ad
simplify(ad.subs(f_th,0).subs(h,sqrt(mu*a*(1-e**2))))
simplify(ad.subs(f_r,0).subs(h,sqrt(mu*a*(1-e**2))).subs(r,a*(1-e**2)/(1+e*cos(nu))))
#
| Notebooks/Gauss Equations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # IMDB Dataset Analysis
# +
# Supress Warnings
import warnings
warnings.filterwarnings('ignore')
# +
# Import the numpy and pandas packages
import numpy as np
import pandas as pd
# -
# ## Task 1: Reading and Inspection
#
# - ### Subtask 1.1: Import and read
#
# Import and read the movie database. Store it in a variable called `movies`.
# Write your code for importing the csv file here
movies = pd.read_csv('Movie+Assignment+Data.csv')
movies.head()
# Last 5 rows
movies.tail()
# - ### Subtask 1.2: Inspect the dataframe
#
# Inspect the dataframe's columns, shapes, variable types etc.
# Write your code for inspection here
movies.info()
movies.describe().T
# #### Some key point from above table
#
# - Avg movie duration is 107.2 minuts
# - Avg imdb is 6.44
# - Avg number of users reviews is 272
# Write your code for inspection here
a = movies.shape
a
movies.columns
movies.index
# ## Task 2: Cleaning the Data
#
# - ### Subtask 2.1: Inspect Null values
#
# Find out the number of Null values in all the columns and rows. Also, find the percentage of Null values in each column. Round off the percentages upto two decimal places.
# Write your code for column-wise null count here
movies.isnull().sum()
# Write your code for row-wise null count here
movies.isnull().sum(axis = 1)
# +
# Write your code for column-wise null percentages here
(movies.isnull().mean()*100).round(2)
# -
# - ### Subtask 2.2: Drop unecessary columns
#
# For this assignment, you will mostly be analyzing the movies with respect to the ratings, gross collection, popularity of movies, etc. So many of the columns in this dataframe are not required. So it is advised to drop the following columns.
# - color
# - director_facebook_likes
# - actor_1_facebook_likes
# - actor_2_facebook_likes
# - actor_3_facebook_likes
# - actor_2_name
# - cast_total_facebook_likes
# - actor_3_name
# - duration
# - facenumber_in_poster
# - content_rating
# - country
# - movie_imdb_link
# - aspect_ratio
# - plot_keywords
# +
# Write your code for dropping the columns here. It is advised to keep inspecting the dataframe
#after each set of operations
to_drop = ['color', 'director_facebook_likes','actor_1_facebook_likes','actor_2_facebook_likes',
'actor_3_facebook_likes', 'actor_2_name','cast_total_facebook_likes','actor_3_name',
'duration','facenumber_in_poster','content_rating','country','movie_imdb_link','aspect_ratio',
'plot_keywords']
# Taking all column names to list
my_col = movies.columns.to_list()
new_col = []
[new_col.append(i) for i in my_col if i not in to_drop]
print(new_col) # These columns are to be retained
movies = movies[['movie_title','actor_1_name','director_name', 'language','genres','title_year','budget', 'gross', 'imdb_score','num_critic_for_reviews', 'num_voted_users', 'num_user_for_reviews','movie_facebook_likes']]
movies.head() # Reshuffling DataFrame
# -
#Checking Shape after removing columns
movies.shape
movies.describe().T
# - ### Subtask 2.3: Drop unecessary rows using columns with high Null percentages
#
# Now, on inspection you might notice that some columns have large percentage (greater than 5%) of Null values. Drop all the rows which have Null values for such columns.
# +
# Columnwise null values
movies.isnull().mean(axis = 0)*100
# -
# #### We can see 'gross' and 'budget' columns have more than 5% of null values
# Write your code for dropping the rows here
movies.dropna(axis = 0, subset = ['gross', 'budget'], inplace = True)
movies.head()
#Checking Shape
movies.shape
#Checking % of Null Values in columns
movies.isnull().mean(axis = 0)*100
# - ### Subtask 2.4: Fill NaN values
#
# You might notice that the `language` column has some NaN values. Here, on inspection, you will see that it is safe to replace all the missing values with `'English'`.
movies['language'].isnull().sum()
movies[movies['language'].isnull()== True]
# Write your code for filling the NaN values in the 'language' column here
movies['language'].fillna(value = 'English', inplace = True)
#Checking
movies.loc[[3086, 4110, 4958], :]
#Checking % of Null Values in columns
movies.isnull().mean(axis = 0)*100
# - ### Subtask 2.5: Check the number of retained rows
#
# You might notice that two of the columns viz. `num_critic_for_reviews` and `actor_1_name` have small percentages of NaN values left. You can let these columns as it is for now. Check the number and percentage of the rows retained after completing all the tasks above.
movies.shape
# ### Total rows retained = 3891
b = movies.shape
b
# No. of Rows in initial dataset
a[0]
# No of Rows after Cleaning Dataset
b[0]
# Write your code for checking number of retained rows here
retained_rows_percent = b[0]*100/a[0]
retained_rows_percent
# ### Rows retained = 77.16%
# **Checkpoint 1:** You might have noticed that we still have around `77%` of the rows!
# ## Task 3: Data Analysis
#
# - ### Subtask 3.1: Change the unit of columns
#
# Convert the unit of the `budget` and `gross` columns from `$` to `million $`.
movies.head()
# Write your code for unit conversion here from $ to Million $
movies[['budget', 'gross']] = movies[['budget', 'gross']].apply(lambda x: x/1000000)
movies.head()
movies.describe().T
# - ### Subtask 3.2: Find the movies with highest profit
#
# 1. Create a new column called `profit` which contains the difference of the two columns: `gross` and `budget`.
# 2. Sort the dataframe using the `profit` column as reference.
# 3. Plot `profit` (y-axis) vs `budget` (x- axis) and observe the outliers using the appropriate chart type.
# 4. Extract the top ten profiting movies in descending order and store them in a new dataframe - `top10`
# Write your code for creating the profit column here
movies['profit'] = movies['gross'] - movies['budget']
movies.head()
movies.shape
# Write your code for sorting the dataframe here
movies.sort_values(by = 'profit', ascending = False, inplace = True)
movies.head(10)
movies.tail()
# +
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
sns.set_style('whitegrid')
# -
# Write code for profit vs budget plot here
plt.figure(figsize = (10,8))
plt.scatter(x= movies['budget'], y = movies['profit'], c = 'r', )
plt.xlabel("Budget")
plt.ylabel("Profit")
plt.title("Budget Vs Profit")
plt.show()
# +
#Plot after removing outliers
plt.figure(figsize = (10,8))
sns.jointplot(x= 'budget', y = 'profit', data = movies.iloc[:-14, :], kind = 'scatter')
plt.show()
# +
# Write your code to get the top 10 profiting movies here
top10 = movies.iloc[:10, :]
top10
# -
# #### Here we can Observe The Avengers movie is repeated so it means we have duplicates
# - ### Subtask 3.3: Drop duplicate values
#
# After you found out the top 10 profiting movies, you might have noticed a duplicate value. So, it seems like the dataframe has duplicate values as well. Drop the duplicate values from the dataframe and repeat `Subtask 3.2`. Note that the same `movie_title` can be there in different languages.
# +
#Checking - searching Duplicate Rows based on movie_title and language
pd.set_option('display.max_rows', 200)
dupli_bool = movies.duplicated(subset = 'movie_title', keep = False)
duplicate = movies.loc[dupli_bool == True]
print(duplicate[['movie_title', 'language']])
# -
# ### We can See the movie 'The Host' is in Korean and English
movies.shape
# Write your code for dropping duplicate values here
movies.drop_duplicates(subset = None, inplace = True)
movies.head(10)
movies.shape
# +
# Checking - 35 rows are removed
pd.set_option('display.max_rows', 200)
dupli_bool2 = movies.duplicated(subset = 'movie_title', keep = False)
duplicate2 = movies.loc[dupli_bool2 == True]
duplicate2
# -
#
#
# ### We can still see similar rows but they are different in mostly one column "num_voted_users "
#
#
#
# Checking 'The Host' is not removed from the dataframe
movies.loc[[1002, 2988], :]
# +
# Repeat subtask 3.2
# Subtask 3.2: Find the movies with highest profit
# Create a new column called profit which contains the difference of the two columns: gross and budget.
# Sort the dataframe using the profit column as reference.
# Plot profit (y-axis) vs budget (x- axis) and observe the outliers using the appropriate chart type.
# Extract the top ten profiting movies in descending order and store them in a new dataframe - top10
# -
# ### movies dataframe already having profit column and sorted
# * Create a new column called profit which contains the difference of the two columns: gross and budget.
# * Sort the dataframe using the profit column as reference.
movies.head()
movies.shape
# Write code for repeating subtask 2 here
# Write code for profit vs budget plot here
# Write code for profit vs budget plot here
plt.figure(figsize = (10,8))
plt.scatter(x= movies['budget'], y = movies['profit'], c = 'g', )
plt.xlabel("Budget")
plt.ylabel("Profit")
plt.title("Budget Vs Profit")
plt.show()
# +
# Write code for repeating subtask 2 here
# Write your code to get the top 10 profiting movies here
top10 = movies.iloc[:10, :]
top10
# -
# ### We can see the duplicate row removed ,
# #### Two movies directed by <NAME> in the list are Avatar and Titanic
# **Checkpoint 2:** You might spot two movies directed by `<NAME>` in the list.
# - ### Subtask 3.4: Find IMDb Top 250
#
# 1. Create a new dataframe `IMDb_Top_250` and store the top 250 movies with the highest IMDb Rating (corresponding to the column: `imdb_score`). Also make sure that for all of these movies, the `num_voted_users` is greater than 25,000.
# Also add a `Rank` column containing the values 1 to 250 indicating the ranks of the corresponding films.
# 2. Extract all the movies in the `IMDb_Top_250` dataframe which are not in the English language and store them in a new dataframe named `Top_Foreign_Lang_Film`.
# +
# Creating a copy of movies dataframe - This dataframe is cleaned of NaN and Duplicate rows
IMDB_movies = movies.copy()
IMDB_movies.head()
# -
IMDB_movies.shape
IMDB_movies.columns
# +
# Since there are several similar rows with different 'num_voted_users' we take mean of that column
IMDB_movies = IMDB_movies.groupby(['movie_title', 'actor_1_name', 'director_name', 'language', 'genres',
'title_year', 'budget', 'gross', 'imdb_score', 'num_critic_for_reviews',
'num_user_for_reviews', 'movie_facebook_likes',
'profit'])['num_voted_users'].mean()
IMDB_movies = pd.DataFrame(IMDB_movies)
IMDB_movies.reset_index(inplace = True)
# -
IMDB_movies.shape
IMDB_movies.head()
# +
# Checking duplicate rows having same movie_title
pd.set_option('display.max_rows', 200)
dupli_bool3 = IMDB_movies.duplicated(subset = 'movie_title', keep = False)
duplicate3 = IMDB_movies.loc[dupli_bool3 == True]
duplicate3
# +
# Write your code for extracting the top 250 movies as per the IMDb score here.
#Make sure that you store it in a new dataframe and name that dataframe as 'IMDb_Top_250'
# First sorting by IMDb score
IMDB_movies.sort_values(by = 'imdb_score', ascending = False, inplace = True) # sort by imdb score
IMDB_movies.head()
# +
#Filtering based on 'num_voted_users' > 25000
IMDB_movies = IMDB_movies[IMDB_movies['num_voted_users'] > 25000]
IMDB_movies.head()
# -
IMDB_movies.shape
# ### So now only 2544 rows are remaining
# +
# Top 250 movies based on imdb score and Filtering based on 'num_voted_users' > 25000
IMDb_Top_250 = IMDB_movies.iloc[:250, :]
IMDb_Top_250.head()
# -
IMDb_Top_250.shape
# +
# Making a new rank column based on IMDb score
IMDb_Top_250['rank'] = IMDb_Top_250['imdb_score'].rank(ascending = False, method = 'first')
IMDb_Top_250
# -
# ## <NAME> Rank is 238
# +
#Extract all the movies in the IMDb_Top_250 dataframe which are not in the English language and
#store them in a new dataframe named Top_Foreign_Lang_Film.
# Write your code to extract top foreign language films from 'IMDb_Top_250' here
Top_Foreign_Lang_Film = IMDb_Top_250[IMDb_Top_250['language'] != 'English']
Top_Foreign_Lang_Film
# -
# ### <NAME> is fourth last
# **Checkpoint 3:** Can you spot `Veer-Zaara` in the dataframe?
# - ### Subtask 3.5: Find the best directors
#
# 1. Group the dataframe using the `director_name` column.
# 2. Find out the top 10 directors for whom the mean of `imdb_score` is the highest and store them in a new dataframe `top10director`. Incase of a tie in IMDb score between two directors, sort them alphabetically.
# +
#Making a working dataframe
Director_movies = movies.copy()
Director_movies.shape
# +
# Write your code for extracting the top 10 directors here
top10director = Director_movies.groupby( ['director_name'])['imdb_score'].mean().sort_values(ascending = False)
top10director = pd.DataFrame(top10director)
# -
top10director = top10director.sort_values(by= ['imdb_score', 'director_name'], ascending= [False, True])
top10director = top10director.iloc[:10, :]
top10director
# **Checkpoint 4:** No surprises that `<NAME>` (director of Whiplash and La La Land) is in this list.
# - ### Subtask 3.6: Find popular genres
#
# You might have noticed the `genres` column in the dataframe with all the genres of the movies seperated by a pipe (`|`). Out of all the movie genres, the first two are most significant for any film.
#
# 1. Extract the first two genres from the `genres` column and store them in two new columns: `genre_1` and `genre_2`. Some of the movies might have only one genre. In such cases, extract the single genre into both the columns, i.e. for such movies the `genre_2` will be the same as `genre_1`.
# 2. Group the dataframe using `genre_1` as the primary column and `genre_2` as the secondary column.
# 3. Find out the 5 most popular combo of genres by finding the mean of the gross values using the `gross` column and store them in a new dataframe named `PopGenre`.
# +
# Making Working dataframe
Popular_genre = movies.copy()
Popular_genre.head()
# -
Popular_genre.tail(10)
# #### We can see row 3075 etc has only one genre
# +
# Write your code for extracting the first two genres of each movie here
Popular_genre['genre_1'] = Popular_genre['genres'].str.split('|').str[0]
Popular_genre['genre_2'] = Popular_genre['genres'].str.split('|').str[1]
Popular_genre.head()
# -
Popular_genre.tail(10)
# #### Column genre_2 , row 3075 is filled with NaN
# +
#Replacing NaN with genre_1 values
Popular_genre['genre_2'].fillna(Popular_genre['genres'].str.split('|').str[0], inplace = True)
Popular_genre.tail(10)
# -
Popular_genre.info()
# +
# Write your code for grouping the dataframe here
movies_by_segment = Popular_genre.groupby(['genre_1', 'genre_2']).mean()
movies_by_segment.head()
# +
# Write your code for getting the 5 most popular combo of genres here
PopGenre = Popular_genre.groupby(['genre_1', 'genre_2'])['gross'].mean().sort_values(ascending = False)
PopGenre = PopGenre[:5]
PopGenre = pd.DataFrame(PopGenre)
PopGenre
# -
# **Checkpoint 5:** Well, as it turns out. `Family + Sci-Fi` is the most popular combo of genres out there!
# - ### Subtask 3.7: Find the critic-favorite and audience-favorite actors
#
# 1. Create three new dataframes namely, `Meryl_Streep`, `Leo_Caprio`, and `Brad_Pitt` which contain the movies in which the actors: '<NAME>', '<NAME>', and '<NAME>' are the lead actors. Use only the `actor_1_name` column for extraction. Also, make sure that you use the names '<NAME>', '<NAME>', and '<NAME>' for the said extraction.
# 2. Append the rows of all these dataframes and store them in a new dataframe named `Combined`.
# 3. Group the combined dataframe using the `actor_1_name` column.
# 4. Find the mean of the `num_critic_for_reviews` and `num_users_for_review` and identify the actors which have the highest mean.
# 5. Observe the change in number of voted users over decades using a bar chart. Create a column called `decade` which represents the decade to which every movie belongs to. For example, the `title_year` year 1923, 1925 should be stored as 1920s. Sort the dataframe based on the column `decade`, group it by `decade` and find the sum of users voted in each decade. Store this in a new data frame called `df_by_decade`.
# +
# Create working Dataframe
Favourite_actor = movies.copy()
Favourite_actor.head()
# +
# Write your code for creating three new dataframes here
# Include all movies in which Meryl_Streep is the lead
Meryl_Streep = Favourite_actor[Favourite_actor['actor_1_name'] == '<NAME>']
Meryl_Streep.head() #Expected Output
# -
Meryl_Streep.shape
Leo_Caprio = Favourite_actor[Favourite_actor['actor_1_name'] == '<NAME>']
Leo_Caprio.head() #Expected Output
Leo_Caprio.shape
Brad_Pitt = Favourite_actor[Favourite_actor['actor_1_name'] == '<NAME>']
Brad_Pitt.head() #Expected Output
Brad_Pitt.shape
# +
# Write your code for combining the three dataframes here
Combined = pd.concat([Meryl_Streep, Leo_Caprio, Brad_Pitt])
Combined.head() #Expected Output
# -
Combined.shape
# +
# Write your code for grouping the combined dataframe here
Combined.groupby(['actor_1_name']).mean() #Expected Output
# -
# Write the code for finding the mean of critic reviews here
Critic_Review = Combined.groupby(['actor_1_name'])['num_critic_for_reviews'].mean().sort_values(ascending = False)
Critic_Review #Expected Output
# Write the code for finding the mean of audience reviews here
User_Review = Combined.groupby(['actor_1_name'])['num_user_for_reviews'].mean().sort_values(ascending = False)
User_Review #Expected Output
# #### num_critic_for_reviews = <NAME> = 330.190476
# #### num_user_for_reviews = <NAME> = 914.476190
# **Checkpoint 6:** `Leonardo` has aced both the lists!
# +
# Creating working Dataframe
Decade = movies.copy()
Decade.head()
# -
# Write the code for calculating decade here
Decade['decade'] = ((Decade['title_year']//10) * 10)
Decade.head() #Expected Output
Decade.sort_values(by = 'decade', inplace = True)
Decade.head()
# Write your code for creating the data frame df_by_decade here
df_by_decade = Decade.groupby('decade')['num_voted_users'].sum()
df_by_decade = pd.DataFrame(df_by_decade)
df_by_decade.reset_index(inplace = True)
df_by_decade #Expected Output
# Write your code for plotting number of voted users vs decade
plt.figure(figsize = (10, 8))
sns.barplot(x = 'decade', y = 'num_voted_users' , data = df_by_decade)
plt.xlabel("Decade")
plt.ylabel('User_Votes')
plt.yscale('log')
plt.title("BARPLOT - User Votes vs Decade")
plt.show() #Expected Output
# #### Thankyou :-)
| IMDB_Data_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # First test of GIS with geoplot
# In here we try to perform simple operations with an high level package, called `geoplot`
# +
import geoplot as gplt
# %matplotlib inline
# -
# Prepare raw data, in the form of a shapefile converted into geopandas
import geopandas as gpd
#cities = gpd.read_file('data/citiesx010g.shp')
boroughs = gpd.read_file("data/boroughs.geojson", driver='GeoJSON')
#continental_cities = cities[cities['STATE'].map(lambda s: s not in ['PR', 'AK', 'HI', 'VI'])]
#continental_cities = continental_cities[continental_cities['POP_2010'] >= 100000]
boroughs
| geoplot/.ipynb_checkpoints/first_test-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Speed from Position Data
#
# In this Notebook you'll work with data just like the data you'll be using in the final project for this course. That data comes from CSVs that looks like this:
#
# | timestamp | displacement | yaw_rate | acceleration |
# | :-------: | :----------: | :------: | :----------: |
# | 0.0 | 0 | 0.0 | 0.0 |
# | 0.25 | 0.0 | 0.0 | 19.6 |
# | 0.5 | 1.225 | 0.0 | 19.6 |
# | 0.75 | 3.675 | 0.0 | 19.6 |
# | 1.0 | 7.35 | 0.0 | 19.6 |
# | 1.25 | 12.25 | 0.0 | 0.0 |
# | 1.5 | 17.15 | -2.82901631903 | 0.0 |
# | 1.75 | 22.05 | -2.82901631903 | 0.0 |
# | 2.0 | 26.95 | -2.82901631903 | 0.0 |
# | 2.25 | 31.85 | -2.82901631903 | 0.0 |
from helpers import process_data
from matplotlib import pyplot as plt
PARALLEL_PARK_DATA = process_data("parallel_park.pickle")
# +
# This is what the first few entries in the parallel
# park data look like.
PARALLEL_PARK_DATA[:5]
# +
# In this exercise we'll be differentiating (taking the
# derivative of) displacement data. This will require
# using only the first two columns of this data.
timestamps = [row[0] for row in PARALLEL_PARK_DATA]
displacements = [row[1] for row in PARALLEL_PARK_DATA]
# You'll use these data in the next lesson on integration
# You can ignore them for now.
yaw_rates = [row[2] for row in PARALLEL_PARK_DATA]
accelerations = [row[3] for row in PARALLEL_PARK_DATA]
# -
plt.title("Displacement vs Time while Parallel Parking")
plt.xlabel("Time (seconds)")
plt.ylabel("Displacement (meters)")
plt.scatter(timestamps, displacements)
plt.show()
# In the graph above, you can see displacement vs time data for a car as it parallel parks. Note that backwards motion winds back the odometer and reduces displacement (this isn't actually how odometers work on modern cars. Sorry <NAME>)
#
# Note how for approximately 4 seconds the motion is backwards and then for the last two the car goes forwards.
#
# Let's look at some data somewhere in the middle of this trajectory
print(timestamps[20:22])
print(displacements[20:22])
# So you can see that at $t=1.25$ the car has displacement $x=-1.40875$ and at $t=1.3125$ the car has displacement $x=-1.53125$
#
# This means we could calculate the speed / slope as follows:
#
# $$\text{slope} = \frac{\text{vertical change}}{\text{horizontal change}} = \frac{\Delta x}{\Delta t}$$
#
# and for the numbers I just mentioned this would mean:
#
# $$\frac{\Delta x}{\Delta t} = \frac{-1.53125 - -1.40875}{1.3125 - 1.25} = \frac{-0.1225 \text{ meters}}{0.0625\text{ seconds}} = -1.96 \frac{m}{s}$$
#
# So I can say the following:
#
# > Between $t=1.25$ and $t=1.3125$ the vehicle had an **average speed** of **-1.96 meters per second**
#
# I could make this same calculation in code as follows
# +
delta_x = displacements[21] - displacements[20]
delta_t = timestamps[21] - timestamps[20]
slope = delta_x / delta_t
print(slope)
# -
# Earlier in this lesson you worked with truly continuous functions. In that situation you could make $\Delta t$ as small as you wanted!
#
# But now we have real data, which means the size of $\Delta t$ is dictated by how frequently we made measurements of displacement. In this case it looks like subsequent measurements are separated by
#
# $$\Delta t = 0.0625 \text{ seconds}$$
#
# In the `get_derivative_from_data` function below, I demonstrate how to "take a derivative" of real data. Read through this code and understand how it works: in the next notebook you'll be asked to reproduce this code yourself.
# +
def get_derivative_from_data(position_data, time_data):
"""
Calculates a list of speeds from position_data and
time_data.
Arguments:
position_data - a list of values corresponding to
vehicle position
time_data - a list of values (equal in length to
position_data) which give timestamps for each
position measurement
Returns:
speeds - a list of values (which is shorter
by ONE than the input lists) of speeds.
"""
# 1. Check to make sure the input lists have same length
if len(position_data) != len(time_data):
raise(ValueError, "Data sets must have same length")
# 2. Prepare empty list of speeds
speeds = []
# 3. Get first values for position and time
previous_position = position_data[0]
previous_time = time_data[0]
# 4. Begin loop through all data EXCEPT first entry
for i in range(1, len(position_data)):
# 5. get position and time data for this timestamp
position = position_data[i]
time = time_data[i]
# 6. Calculate delta_x and delta_t
delta_x = position - previous_position
delta_t = time - previous_time
# 7. Speed is slope. Calculate it and append to list
speed = delta_x / delta_t
speeds.append(speed)
# 8. Update values for next iteration of the loop.
previous_position = position
previous_time = time
return speeds
# 9. Call this function with appropriate arguments
speeds = get_derivative_from_data(displacements, timestamps)
# 10. Prepare labels for a plot
plt.title("Speed vs Time while Parallel Parking")
plt.xlabel("Time (seconds)")
plt.ylabel("Speed (m / s)")
# 11. Make the plot! Note the slicing of timestamps!
plt.scatter(timestamps[1:], speeds)
plt.show()
# -
# Now that you've read through the code and seen how it's used (and what the resulting plot looks like), I want to discuss the numbered sections of the code.
# 1. The time and position data need to have equal lengths, since each position measurement is meant to correspond to one of those timestamps.
#
# 2. The `speeds` list will eventually be returned at the end of the function.
#
# 3. The use of the word "previous" in these variable names will be clearer in step 8. But basically we need to have TWO positions if we're ever going to calculate a delta X. This is where we grab the first position in the position_data list.
#
# 4. Note that we loop from `range(1, len(position_data))`, which means that the first value for `i` will be `1` and **not** `0`. That's because we already grabbed element 0 in step 3.
#
# 5. Get the data for this `i`.
#
# 6. Calculate the change in position and time.
#
# 7. Find the slope (which is the speed) and append it to the `speeds` list.
#
# 8. This sets the values of `previous_position` and `previous_time` so that they are correct for the *next* iteration of this loop.
#
# 9. Here we call the function with the `displacements` and `timestamps` data that we used before.
#
# 10. Self-explanatory
#
# 11. This part is interesting. Note that we only plot `timestamps[1:]`. This means "every element in `timestamps` except the first one". Remember how in step 4 we looped through every element except the first one? That means that our `speeds` array ends up being 1 element shorter than our original data.
# ## What to Remember
# You don't need to memorize any of this. The important thing to remember is this:
#
# When you're working with real time-series data, you calculate the "derivative" by finding the slope between adjacent data points.
#
# You'll be implementing this on your own in the next notebook. Feel free to come back here if you need help, but try your best to get it on your own.
| vehicle_motion_and_calculus/Speed from Position Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example 8 (TEST): Ionization and Thermal History - Define GW KWARG
# In this part of the notebook, we will show several different examples of how to evaluate the temperature and ionization histories using many of the different options available to `DarkHistory`. The function that solves the differential equations is [*history.tla.get_history()*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/history/tla/darkhistory.history.tla.get_history.html), and the set of equations that is solved can be compactly written as follows:
#
# $$ \dot{T}_m = \dot{T}_m^{(0)} + \dot{T}_m^{\text{inj}} + \dot{T}_m^{\text{re}} $$
# # + #\dot{T}_{GW}
#
# $$ \dot{x}_\text{HII} = \dot{x}_\text{HII}^{(0)} + \dot{x}_\text{HII}^{\text{inj}} + \dot{x}_\text{HII}^\text{re} $$
#
# $$ \dot{x}_\text{HeII} = \dot{x}_\text{HeII}^{(0)} + \dot{x}_\text{HeII}^{\text{inj}} + \dot{x}_\text{HeII}^\text{re} $$
#
# with each of the terms defined in Eqs. (2), (5), (51), and (52) of paper I.
# ## Notebook Initialization
# %load_ext autoreload
import sys
sys.path.append("..")
# %matplotlib inline
# +
import matplotlib
print('matplotlib: {}'.format(matplotlib.__version__))
import numpy as np
print(np.__version__)
# -
>>> import matplotlib
>>> matplotlib.matplotlib_fname()
# +
# %autoreload
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rc_file('matplotlibrc')
import numpy as np
from scipy.interpolate import interp1d
import darkhistory.physics as phys
import darkhistory.history.tla as tla
# -
# ## Standard Ionization and Temperature Histories with Helium
# The most basic use of `get_history` is to obtain the standard ionization and temperature histories with no exotic energy injection sources or reionization. In the simplest approximation, we can neglect helium and simply evaluate the following equations:
#
# $$ \dot{T}_m = \dot{T}_m^{(0)}, \qquad \dot{x}_\text{HII} = \dot{x}_\text{HII}^{(0)}. $$
#
# First, we initialize the redshift abscissa. This must be defined in decreasing redshift. Note that the transfer functions used in calculating $f_c(z)$ only span a redshift range of $4 \leq 1+z \leq 3000$. In this example, we solve the TLA from $1+z = 3000$ to $1+z = 1$.
#
# First, we define a redshift vector at which we would like to find the solution. The vector must be stored in *decreasing* redshift order, and should always be defined as $1+z$.
rs_vec = np.flipud(np.arange(1., 3000., 0.1))
# Now, we call the solver. If we simply pass the redshift abscissa to the function with no further options, the solution will not have any source of reionization, and helium is ignored. The initial conditions can be manually passed to the solver through the keyword argument `init_cond`, but not initializing it will result in the solver starting at the standard ionization and temperature values at `rs_vec[0]`.
soln_no_He = tla.get_history(rs_vec)
# If we would like to also solve for the helium ionization fraction, i.e. solve the system
#
# $$ \dot{T}_m = \dot{T}_m^{(0)}, \qquad \dot{x}_\text{HII} = \dot{x}_\text{HII}^{(0)}, \qquad \dot{x}_\text{HeII} = \dot{x}_\text{HeII}^{(0)}, $$
#
# we will need to set the `helium_TLA` flag to `True`. Here is how we call the function:
#
soln_with_He = tla.get_history(rs_vec, helium_TLA=True)
# The solution is returned as an array with dimensions `(rs_vec.size, 4)`. Each column corresponds to the solution for ($T_m$ in eV, $x_\mathrm{HII}$, $x_\mathrm{HeII}$, $x_\mathrm{HeIII}$). Let's plot temperature in K, and the ionization fraction $x_e = x_\mathrm{HII} + x_\mathrm{HeII}$ as a function of redshift $1+z$. We will plot both solutions for comparison.
# +
Tm = soln_no_He[:,0]/phys.kB
xe_no_He = soln_no_He[:,1]
# The other columns with xHeII and xHeIII are negligible.
xe_with_He = soln_with_He[:,1] + soln_with_He[:,2]
#The last column with xHeIII is negligible
fig_He = plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
plt_T_IGM, = plt.plot(rs_vec, 20*Tm, label='IGM Temperature')
# Plot the CMB temperature for comparison, using the function phys.TCMB
plt_T_CMB, = plt.plot(rs_vec, phys.TCMB(rs_vec)/phys.kB, 'k--', label='CMB Temperature')
ax.set_xscale('log')
ax.set_yscale('log')
plt.legend(handles=[plt_T_IGM, plt_T_CMB], loc=4)
plt.title(r'\bf{Temperature History}')
plt.xlabel(r'Redshift $(1+z)$')
plt.ylabel('Temperature [K]')
plt.axis([1, 2e3, 1e-2, 1e4])
ax = plt.subplot(1,2,2)
plt_He, = plt.plot(rs_vec, xe_with_He, label=r'$x_e$, with He')
plt_no_He, = plt.plot(rs_vec, xe_no_He, label=r'$x_e$, no He')
plt.legend(handles=[plt_He, plt_no_He], loc=2)
# ax.set_xscale('log')
ax.set_yscale('log')
plt.title(r'\bf{Ionization History}')
plt.xlabel(r'Redshift $(1+z)$')
plt.ylabel(r'Free Electron Fraction $x_e$')
plt.axis([1, 3e3, 1e-4, 2.7])
from matplotlib.transforms import blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
transform = blended_transform_factory(fig_He.transFigure, ax.transAxes)
axins = inset_axes(ax, width="22%", height="50%",
bbox_to_anchor=(0, 0.1, 0.885, 0.885),
bbox_transform=transform, loc=4, borderpad=0)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
plt.plot(rs_vec, xe_with_He)
plt.plot(rs_vec, xe_no_He)
plt.title(r'\bf{Helium Recombination}', fontsize=18, y = 0.95)
plt.yticks([0.96, 1., 1.04, 1.08, 1.12])
plt.xticks([1600, 2000, 2400, 2800])
plt.axis([1.4e3, 3e3, 0.95, 1.13])
# -
print(Tm.size)
# A few technical details to note. Ionization levels are solved numerically using the variable
#
# $$\zeta_i \equiv \text{arctanh} \left[\frac{2}{\chi_i}\left(x_i - \frac{\chi_i}{2} \right) \right]$$
#
# for $i = $ HII, HeII and HeIII, with $\chi_i=1$ for HII and $\chi_i = \mathcal{F}_\text{He}$ for HeII and HeIII. This guarantees that the ionization fraction $x_e$ is bounded between 0 and 1. As a result, it is common to find that $x_i$ values may not be completely zero when they are expected to be, but instead have some small, non-zero value. This is simply a negligible numerical error that can be ignored. Ionization levels below $10^{-12}$ should not be regarded as accurate.
# ## Reionization
# ### Default Model
# The solver can also accept a reionization model. A default reionization model is included with `DarkHistory`. This model combines the fiducial photoionization and photoheating rates provided in [[1]](#cite_rates1) with rates for collisional ionization, collisional excitation, recombination cooling and bremsstrahlung cooling given in [[2]](#cite_rates2), in order to obtain the ionization/temperature change during reionization (for more details, refer to the paper). The photoionization and photoheating data for the model are stored in [*history.reionization*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/history/darkhistory.history.reionization.html).
#
# Reionization models must come with a redshift at which they turn on, $1+z_\text{re}$ (the default model has $1+z_\text{re} = 16.1$, and is set automatically by the code). Prior to reionization, the solver will integrate the usual set of equations without reionization:
#
# $$ \dot{T}_m = \dot{T}_m^{(0)}, \qquad \dot{x}_\text{HII} = \dot{x}_\text{HII}^{(0)}, \qquad \dot{x}_\text{HeII} = \dot{x}_\text{HeII}^{(0)}, $$
#
# Once reionization starts, it will instead integrate
#
# $$ \dot{T}_m = \dot{T}_m^{(0)} + \dot{T}_m^\text{re}, \qquad \dot{x}_\text{HII} = \dot{x}_\text{HII}^\text{re}, \qquad \dot{x}_\text{HeII} = \dot{x}_\text{HeII}^\text{re}, $$
#
# To obtain the temperature/ionization history with the default reionization model, simply specify `reion_switch = True`. In the default case, the solver knows when to switch to the reionization equations. For other models, the user must also specify the `reion_rs` flag. In this example, we will also solve for helium as well, which is the self-consistent thing to do here.
soln_default_reion = tla.get_history(rs_vec, helium_TLA=True, reion_switch=True)
# And now plot the results. These results should be compared to Fig. 6 of [[1]](#cite_rates1) and are in good agreement.
# +
fig_reion=plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
plt_Tm_reion, = plt.plot(rs_vec, soln_default_reion[:,0]/phys.kB, label=r'Matter Temperature $T_m$')
# Text
plt.text(0.06, 0.88, 'Reionization (Puchwein+)', fontsize=20, transform=ax.transAxes)
leg = plt.legend(handles=[plt_Tm_reion], loc=3)
plt.title(r'\bf{Temperature History}')
plt.xlabel('Redshift (1+z)')
plt.ylabel(r'Temperature [K]')
plt.axis([1, 17.5, 0, 20000])
ax = plt.subplot(1,2,2)
plt_xHII, = plt.plot(rs_vec, soln_default_reion[:,1], linewidth=2, label=r'$n_\mathrm{HII}/n_\mathrm{H}$')
plt_xHeII, = plt.plot(rs_vec, soln_default_reion[:,2]/phys.chi, linewidth=2, label=r'$n_\mathrm{HeII}/n_\mathrm{He}$')
# The solution is stored as nHeII/nH, so it needs to be converted to nHeII/nHe.
plt_xHeIII, = plt.plot(rs_vec, soln_default_reion[:,3]/phys.chi, linewidth=2, label=r'$n_\mathrm{HeIII}/n_\mathrm{He}$')
# Same for HeIII.
# Text
plt.text(0.06, 0.88, 'Reionization (Puchwein+)', fontsize=20, transform=ax.transAxes)
leg = plt.legend(handles=[plt_xHII, plt_xHeII, plt_xHeIII], loc=5)
plt.title(r'\bf{Ionization History}')
plt.xlabel('Redshift (1+z)')
plt.ylabel('Ionization Fraction')
plt.axis([1, 17.5, 0, 1.2])
# -
# ### User-Defined Model
# The user may enter their own photoionization and photoheating rates into the TLA solver. This is done by defining two tuples of functions, each containing the photoionization and photoheating rates respectively of HI, HeI and HeII. Here, we define functions `photoion_rate` and `photoheat_rate` to return these tuples, given some new photoionization and photoheating rates. In this example, we use the rates from [[3]](#cite_rates3).
plt_xHII_Haardt, = plt.plot(
rs_vec, soln_user_reion[:,1],
label=r'$n_\mathrm{HII}/n_\mathrm{H}$, Haardt+'
)
# Now we call the solver. In addition to setting `reion_switch=True`, we must also specify the redshift `reion_rs` at which reionization effects start, as well as the two tuples for `photoion_rate_func` and `photoheat_rate_func` respectively.
soln_user_reion = tla.get_history(
rs_vec, helium_TLA=True, reion_switch=True,
reion_rs = 16.1, GWrate_func=gwrate(), photoion_rate_func=photoion_rate(), photoheat_rate_func=photoheat_rate(),
)
# Plot the results, and compare them to the our default model. Again, this shows good agreement with Fig. 6 of [[1]](#cite_rates1).
# +
def photoion_rate():
rs_vec = 1. + np.array([
0.00, 0.05, 0.10, 0.16, 0.21, 0.27, 0.33, 0.40, 0.47,
0.54, 0.62, 0.69, 0.78, 0.87, 0.96, 1.05, 1.15, 1.26,
1.37, 1.49, 1.61, 1.74, 1.87, 2.01, 2.16, 2.32, 2.48,
2.65, 2.83, 3.02, 3.21, 3.42, 3.64, 3.87, 4.11, 4.36,
4.62, 4.89, 5.18, 5.49, 5.81, 6.14, 6.49, 6.86, 7.25,
7.65, 8.07, 8.52, 8.99, 9.48, 9.99, 10.50, 11.10, 11.70,
12.30, 13.00, 13.70, 14.40, 15.10
])
rate_vec_HI = np.array([
22.8, 28.4, 35.4, 44.0, 54.6, 67.4, 83.1, 102, 125,
152, 185, 223, 267, 318, 376, 440, 510, 585, 660, 732,
799, 859, 909, 944, 963, 965, 950, 919, 875, 822, 765,
705, 647, 594, 546, 504, 469, 441, 412, 360, 293, 230,
175, 129, 92.8, 65.5, 45.6, 31.2, 21.2, 14.3, 9.59,
6.40, 4.27, 2.92, 1.73, 1.02, 0.592, 0.341, 0.194
]) * 1e-15
rate_vec_HeI = np.array([
12.4, 15.7, 19.6, 24.6, 30.7, 38.3, 47.5, 58.7, 72.2,
88.4, 108, 130, 157, 187, 222, 261, 302, 346, 391,
434, 474, 509, 538, 557, 567, 566, 555, 535, 508,
476, 441, 406, 372, 341, 314, 291, 271, 253, 237, 214,
184, 154, 125, 99.2, 76.1, 56.8, 41.4, 29.6, 20.7,
14.4, 9.82, 6.67, 4.53, 3.24, 2.02, 1.23, 0.746, 0.446,
0.262
]) * 1e-15
rate_vec_HeII = np.array([
0.555, 0.676, 0.823, 1.00, 1.22, 1.48, 1.80, 2.18,
2.63, 3.17, 3.80, 4.54, 5.38, 6.33, 7.38, 8.52,
9.70, 10.9, 11.9, 12.7, 13.2, 13.4, 13.3, 12.8,
11.9, 10.6, 9.04, 7.22, 5.30, 3.51, 2.08, 1.14,
0.591, 0.302, 0.152, 0.0760, 0.0375, 0.0182,
0.00857, 0.00323, 0.00117, 4.42e-4, 1.73e-4,
7.01e-5, 2.92e-5, 1.25e-5, 5.67e-6, 2.74e-6, 1.44e-6,
8.19e-7, 4.99e-7, 3.25e-7, 2.12e-7, 1.43e-7, 9.84e-8,
6.81e-8, 4.73e-8, 3.30e-8, 1.92e-8
]) * 1e-15
def ion_rate_HI(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_HI))
return 10**log10_rate
def ion_rate_HeI(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_HeI))
return 10**log10_rate
def ion_rate_HeII(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_HeII))
return 10**log10_rate
return (ion_rate_HI, ion_rate_HeI, ion_rate_HeII)
def photoheat_rate():
rs_vec = 1. + np.array([
0.00, 0.05, 0.10, 0.16, 0.21, 0.27, 0.33, 0.40, 0.47,
0.54, 0.62, 0.69, 0.78, 0.87, 0.96, 1.05, 1.15, 1.26,
1.37, 1.49, 1.61, 1.74, 1.87, 2.01, 2.16, 2.32, 2.48,
2.65, 2.83, 3.02, 3.21, 3.42, 3.64, 3.87, 4.11, 4.36,
4.62, 4.89, 5.18, 5.49, 5.81, 6.14, 6.49, 6.86, 7.25,
7.65, 8.07, 8.52, 8.99, 9.48, 9.99, 10.50, 11.10, 11.70,
12.30, 13.00, 13.70, 14.40, 15.10
])
rate_vec_HI = np.array([
8.89, 11.1, 13.9, 17.3, 21.5, 26.6, 32.9, 40.5, 49.6,
60.5, 73.4, 88.5, 106, 126, 149, 175, 203, 232, 262,
290, 317, 341, 360, 374, 381, 382, 375, 363, 346, 325,
302, 279, 257, 236, 218, 202, 189, 178, 167, 148, 123,
98.9, 77.1, 58.3, 43.0, 31.0, 21.9, 15.3, 10.5, 7.13,
4.81, 3.23, 2.17, 1.51, 0.915, 0.546, 0.323, 0.189, 0.110
]) * 1e-14
rate_vec_HeI = np.array([
11.2, 14.0, 17.4, 21.6, 26.7, 33.1, 40.8, 50.2, 61.5,
75.1, 91.1, 110, 132, 157, 186, 217, 251, 287, 323,
357, 387, 413, 432, 444, 446, 438, 422, 398, 368, 336,
304, 274, 249, 227, 209, 194, 181, 170, 160, 146, 130,
112, 95.2, 78.3, 62.5, 48.3, 36.3, 26.6, 19.1, 13.4,
9.27, 6.36, 4.35, 3.14, 1.98, 1.22, 0.749, 0.455, 0.270
])* 1e-14
rate_vec_HeII = np.array([
1.14, 1.38, 1.68, 2.03, 2.45, 2.96, 3.57, 4.29, 5.14,
6.15, 7.32, 8.67, 10.2, 11.9, 13.9, 15.9, 18.1, 20.2,
22.1, 23.7, 24.7, 25.3, 25.2, 24.4, 22.9, 20.7, 17.8,
14.5, 11.1, 7.75, 4.97, 2.96, 1.68, 0.925, 0.501, 0.267,
0.141, 0.0727, 0.0365, 0.0156, 0.00624, 0.00269, 0.00128,
6.74e-4, 3.88e-4, 2.40e-4, 1.55e-4, 1.03e-4, 6.98e-5,
4.76e-5, 3.26e-5, 2.24e-5, 1.53e-5, 1.06e-5, 7.52e-6,
5.31e-6, 3.73e-6, 2.57e-6, 1.54e-6
]) * 1e-14
def heat_rate_HI(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_HI))
return 10**log10_rate
def heat_rate_HeI(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_HeI))
return 10**log10_rate
def heat_rate_HeII(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_HeII))
return 10**log10_rate
return (heat_rate_HI, heat_rate_HeI, heat_rate_HeII)
# +
from numpy.random import seed
from numpy.random import rand
seed(1)
values = rand(59)
# +
def gwrate():
rs_vec = 1. + np.array([
0.00, 0.05, 0.10, 0.16, 0.21, 0.27, 0.33, 0.40, 0.47,
0.54, 0.62, 0.69, 0.78, 0.87, 0.96, 1.05, 1.15, 1.26,
1.37, 1.49, 1.61, 1.74, 1.87, 2.01, 2.16, 2.32, 2.48,
2.65, 2.83, 3.02, 3.21, 3.42, 3.64, 3.87, 4.11, 4.36,
4.62, 4.89, 5.18, 5.49, 5.81, 6.14, 6.49, 6.86, 7.25,
7.65, 8.07, 8.52, 8.99, 9.48, 9.99, 10.50, 11.10, 11.70,
12.30, 13.00, 13.70, 14.40, 15.10
])
rate_vec_gw = np.array([
1.14, 1.38, 1.68, 2.03, 2.45, 2.96, 3.57, 4.29, 5.14,
6.15, 7.32, 8.67, 10.2, 11.9, 13.9, 15.9, 18.1, 20.2,
22.1, 23.7, 24.7, 25.3, 25.2, 24.4, 22.9, 20.7, 17.8,
14.5, 11.1, 7.75, 4.97, 2.96, 1.68, 0.925, 0.501, 0.267,
0.141, 0.0727, 0.0365, 0.0156, 0.00624, 0.00269, 0.00128,
6.74e-4, 3.88e-4, 2.40e-4, 1.55e-4, 1.03e-4, 6.98e-5,
4.76e-5, 3.26e-5, 2.24e-5, 1.53e-5, 1.06e-5, 7.52e-6,
5.31e-6, 3.73e-6, 2.57e-6, 1.54e-6
]) * 1e-14
def GW(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_gw))
return 200*10**log10_rate
return (GW)
# +
def photoheat_rate2():
rs_vec = 1. + np.array([
0.00, 0.05, 0.10, 0.16, 0.21, 0.27, 0.33, 0.40, 0.47,
0.54, 0.62, 0.69, 0.78, 0.87, 0.96, 1.05, 1.15, 1.26,
1.37, 1.49, 1.61, 1.74, 1.87, 2.01, 2.16, 2.32, 2.48,
2.65, 2.83, 3.02, 3.21, 3.42, 3.64, 3.87, 4.11, 4.36,
4.62, 4.89, 5.18, 5.49, 5.81, 6.14, 6.49, 6.86, 7.25,
7.65, 8.07, 8.52, 8.99, 9.48, 9.99, 10.50, 11.10, 11.70,
12.30, 13.00, 13.70, 14.40, 15.10
])
rate_vec_HI = np.array([
8.89, 11.1, 13.9, 17.3, 21.5, 26.6, 32.9, 40.5, 49.6,
60.5, 73.4, 88.5, 106, 126, 149, 175, 203, 232, 262,
290, 317, 341, 360, 374, 381, 382, 375, 363, 346, 325,
302, 279, 257, 236, 218, 202, 189, 178, 167, 148, 123,
98.9, 77.1, 58.3, 43.0, 31.0, 21.9, 15.3, 10.5, 7.13,
4.81, 3.23, 2.17, 1.51, 0.915, 0.546, 0.323, 0.189, 0.110
]) * 1e-14
rate_vec_HeI = np.array([
11.2, 14.0, 17.4, 21.6, 26.7, 33.1, 40.8, 50.2, 61.5,
75.1, 91.1, 110, 132, 157, 186, 217, 251, 287, 323,
357, 387, 413, 432, 444, 446, 438, 422, 398, 368, 336,
304, 274, 249, 227, 209, 194, 181, 170, 160, 146, 130,
112, 95.2, 78.3, 62.5, 48.3, 36.3, 26.6, 19.1, 13.4,
9.27, 6.36, 4.35, 3.14, 1.98, 1.22, 0.749, 0.455, 0.270
])* 1e-14
rate_vec_HeII = np.array([
1.14, 1.38, 1.68, 2.03, 2.45, 2.96, 3.57, 4.29, 5.14,
6.15, 7.32, 8.67, 10.2, 11.9, 13.9, 15.9, 18.1, 20.2,
22.1, 23.7, 24.7, 25.3, 25.2, 24.4, 22.9, 20.7, 17.8,
14.5, 11.1, 7.75, 4.97, 2.96, 1.68, 0.925, 0.501, 0.267,
0.141, 0.0727, 0.0365, 0.0156, 0.00624, 0.00269, 0.00128,
6.74e-4, 3.88e-4, 2.40e-4, 1.55e-4, 1.03e-4, 6.98e-5,
4.76e-5, 3.26e-5, 2.24e-5, 1.53e-5, 1.06e-5, 7.52e-6,
5.31e-6, 3.73e-6, 2.57e-6, 1.54e-6
]) * 1e-14
def heat_rate2_HI(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_HI))
return 1.5*10**log10_rate
def heat_rate2_HeI(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_HeI))
return 1.5*10**log10_rate
def heat_rate2_HeII(rs):
log10_rate = np.interp(rs, rs_vec, np.log10(rate_vec_HeII))
return 1.5*10**log10_rate
return (heat_rate2_HI, heat_rate2_HeI, heat_rate2_HeII)
# -
soln_shira_reion = tla.get_history(
rs_vec, helium_TLA=True, reion_switch=True,
reion_rs = 16.1, photoion_rate_func=photoion_rate(), photoheat_rate_func=photoheat_rate2()
)
soln_user_reion.shape
# +
plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
plt.rc('text', usetex=True)
plt_Tm_Puchwein, = plt.plot(rs_vec, soln_default_reion[:,0]/phys.kB, ':', label='Matter Temperature, Puchwein+')
plt_Tm_Haardt, = plt.plot(rs_vec, soln_user_reion[:,0]/phys.kB, label='Matter Temperature, Haardt+')
plt.legend(handles=[plt_Tm_Puchwein, plt_Tm_Haardt])
plt.title(r'\bf{Temperature History}')
plt.xlabel('Redshift (1+z)')
plt.ylabel('Temperature [K]')
plt.axis([1, 17.5, 0, 20000])
ax = plt.subplot(1,2,2)
plt_xHII_Puchwein, = plt.plot(
rs_vec, soln_default_reion[:,1], ':',
label=r'$n_\mathrm{HII}/n_\mathrm{H}$, Puchwein+'
)
plt_xHII_Haardt, = plt.plot(
rs_vec, soln_user_reion[:,1],
label=r'$n_\mathrm{HII}/n_\mathrm{H}$, Haardt+'
)
plt_xHeIII_Puchwein, = plt.plot(
rs_vec, soln_default_reion[:,3]/phys.chi, ':',
label=r'$n_\mathrm{HeIII}/n_\mathrm{He}$, Puchwein+'
)
plt_xHeIII_Haardt, = plt.plot(
rs_vec, soln_user_reion[:,3]/phys.chi,
label=r'$n_\mathrm{HeIII}/n_\mathrm{He}$, Haardt+'
)
plt.legend(handles=[plt_xHII_Puchwein, plt_xHII_Haardt, plt_xHeIII_Puchwein, plt_xHeIII_Haardt], loc=1)
plt.title(r'\bf{Ionization History}')
plt.xlabel('Redshift (1+z)')
plt.ylabel('Ionization Fraction')
plt.axis([1, 17.5, 0, 1.55])
# +
plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
plt.rc('text', usetex=True)
plt_Tm_Puchwein, = plt.plot(rs_vec, soln_default_reion[:,0]/phys.kB, ':', label='Matter Temperature, Puchwein+')
plt_Tm_Shira, = plt.plot(rs_vec, soln_shira_reion[:,0]/phys.kB, label='Matter Temperature, Shira+')
plt.legend(handles=[plt_Tm_Puchwein, plt_Tm_Shira])
plt.title(r'\bf{Temperature History}')
plt.xlabel('Redshift (1+z)')
plt.ylabel('Temperature [K]')
plt.axis([1, 17.5, 0, 200000])
# +
plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
plt.rc('text', usetex=True)
plt_Tm_Puchwein, = plt.plot(rs_vec, soln_default_reion[:,0]/phys.kB, ':', label='Matter Temperature, Puchwein+')
plt_Tm_Haardt, = plt.plot(rs_vec, soln_user_reion[:,0]/phys.kB, label='Matter Temperature, Haardt+')
plt_Tm_Shira, = plt.plot(rs_vec, soln_shira_reion[:,0]/phys.kB, label='Matter Temperature, Shira+')
plt.legend(handles=[plt_Tm_Puchwein, plt_Tm_Haardt, plt_Tm_Shira])
plt.title(r'\bf{Temperature History}')
plt.xlabel('Redshift (1+z)')
plt.ylabel('Temperature [K]')
plt.axis([1, 17.5, 0, 25000])
# -
# ### Fixed Ionization History
# A less self-consistent way to model reionization is to fix the ionization history once reionization begins. When combined with exotic sources of energy injection, this treatment will not account for additional ionization from the energy injection, while the energy deposition into heating will be calculated based only on the fixed ionization history that has been specified. However, if we expect the additional ionization from energy injection to only be a small perturbation on top of reionization, this may be a good approximation.
#
# We will now take the standard $\text{tanh}$ model that is used in [[4]](#cite_tanh), with
#
# $$ x_e = \frac{1+\mathcal{F}_\text{He}}{2} \left[1 + \tanh \left( \frac{ \eta - \eta_\text{re}}{\delta \eta} \right) \right] . $$
#
# $\eta \equiv (1+z)^{3/2}$, and we take $\delta \eta = 0.75 (1+z)^{1/2}$. We choose $z_\text{re} = 8.8$ and $\delta \eta = 1.5(1+z)^{1/2} \times 0.7$.
#
# For simplicity, `DarkHistory` always assumes $n_\text{HII} = n_\text{HeII}$, a standard assumption in many reionization models. Changing this assumption would only require a relatively straightforward but messy modification to [*history.tla.get_history()*](https://darkhistory.readthedocs.io/en/latest/_autosummary/darkhistory/history/tla/darkhistory.history.tla.get_history.html). We do not implement the full reionization of HeII to HeIII.
#
# We first define the model as a function that takes in redshift $1+z$ and returns the $x_e$ value:
# +
# Define the standard ionization history as an interpolation function for convenience.
def xe_reion_func(rs):
# Parameters of the model.
f = 1. + phys.chi
delta_z = 0.7
delta_eta = 1.5*rs**0.5*delta_z
z_re = 8.8
eta = rs**(3/2)
eta_re = (1 + z_re)**(3/2)
reion_model_xe = (f/2)*(1 + np.tanh((eta_re - eta)/delta_eta))
return reion_model_xe
# -
# Then, we pass it to the solver through the flag `xe_reion_func`. Remember to also specify `reion_rs`, which we take to be $1+z_\text{re} = 10$ here. In order to try to give smooth solutions, the switchover point into the reionization model is *not* given strictly by `reion_rs`, but by the largest redshift where `xe_reion_func` first returns a value of $x_e$ that exceeds the solution without reionization.
soln_fixed_reion = tla.get_history(
rs_vec, reion_switch=True, helium_TLA=True,
reion_rs = 13., xe_reion_func = xe_reion_func
)
# And the plot! Note that this treatment completely neglects the photoheating contribution to the IGM temperature, leading to a very cold IGM compared to the earlier, more complete reionization models. This may however still be useful for setting lower bounds.
# +
plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
plt_TIGM, = plt.plot(rs_vec, soln_fixed_reion[:,0]/phys.kB, label=r'Matter Temperature $T_m$')
# Text
plt.text(0.06, 0.88, 'Reionization (tanh)', fontsize=20, transform=ax.transAxes)
plt.axis([1, 18.5, 1, 12.5])
plt.title(r'\bf{Temperature History}')
plt.xlabel('Redshift (1+z)')
plt.ylabel('Temperature [K]')
leg=plt.legend(handles=[plt_TIGM], loc=4)
ax = plt.subplot(1,2,2)
plt_xHII, = plt.plot(rs_vec, soln_fixed_reion[:,1], label=r'$n_\mathrm{HII}/n_\mathrm{H}$')
plt_xHeII, = plt.plot(rs_vec, soln_fixed_reion[:,2], label=r'$n_\mathrm{HeII}/n_\mathrm{H}$')
plt_xe, = plt.plot(rs_vec, soln_fixed_reion[:,1] + soln_fixed_reion[:,2], label=r'$n_e/n_\mathrm{H}$')
# Text
plt.text(0.06, 0.88, 'Reionization (tanh)', fontsize=20, transform=ax.transAxes)
leg=plt.legend(handles=[plt_xHII, plt_xHeII, plt_xe], loc=5)
plt.axis([1, 18.5, -0.1, 1.4])
plt.title(r'\bf{Ionization History}')
plt.xlabel(r'Redshift $(1+z)$')
plt.ylabel(r'Ionization Fraction')
# -
# ## Reionization + Dark Matter
# ### Dark Matter
# Now, we will introduce dark matter into the TLA equations. We introduce a dark matter species with mass $m_\chi = $ 100 MeV that decays with some (long) lifetime into an $e^+e^-$ pair. The energy injection rate is:
#
# $$ \left( \frac{dE}{dV \, dt} \right)_{\text{inj.}} = \frac{\rho_{\text{DM},0}}{\tau} (1 + z)^3 $$
#
# where $\tau$ is the lifetime, and $\rho_{\text{DM},0}$ is the dark matter density today.
#
# In this example, we will simply use the $f_c(z)$ computed in DarkHistory without backreaction. We call these $f_c(z)$ values the "baseline $f_c(z)$" values. *If the user only intends to use these baseline $f_c(z)$ values, then `get_history()` alone is sufficient for all purposes*. The rest of `DarkHistory` is focused on correctly computing $f_c(z,\mathbf{x})$ to ensure that backreaction is taken into account.
#
# There are two ways to solve the TLA with DM injection using the baseline $f_c(z)$, i.e. without backreaction. The user can call `main.evolve()` with `backreaction = False` or, if the only information they require is the matter temperature and ionization levels, they can directly call `get_history()` with `baseline_f=True`. These $f_c(z)$ values were calculated by Darkhistory and are provided in the downloaded data folder. `get_history()` is also much faster, since it does not calculate spectra.
# Once we specify the DM parameters, we can now call the solver in the manner shown below (we turn off helium for this example). We set $m_\chi = 100 $ MeV and $\tau = 3 \times 10^{25}$s. With dark matter, the redshift cannot go below $1+z = 4$, where the `DarkHistory` calculation of $f_c(z)$ stops.
# +
# %autoreload
rs_vec_DM = np.flipud(np.arange(4, 3000., 0.1))
soln_DM = tla.get_history(
rs_vec_DM, baseline_f = True,
inj_particle = 'elec', DM_process = 'decay',
mDM=1e8, lifetime = 3e25,
reion_switch=False,
helium_TLA=False
)
# -
# And this is the outcome of the computation. We will compare these results with the standard solution with no dark matter energy injection or reionization, which is accessible through `physics.xH_std` and `physics.Tm_std`.
# +
Tm_DM = soln_DM[:,0]/phys.kB
xHII_DM = soln_DM[:,1]
fig_DM = plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
plt_T_DM, = plt.plot(rs_vec_DM, Tm_DM, label=r'Matter Temperature $T_m$, with DM')
plt_T_std, = plt.plot(rs_vec_DM, phys.Tm_std(rs_vec_DM)/phys.kB, label=r'Matter Temperature $T_m$, no DM')
# Plot the CMB temperature for comparison, using the function phys.TCMB
plt_T_CMB, = plt.plot(rs_vec_DM, phys.TCMB(rs_vec_DM)/phys.kB, 'k--', label=r'CMB Temperature $T_\mathrm{CMB}$')
# Text
plt.text(0.06, 0.88, r'$\chi \to e^+e^-$, $m_\chi$ = 100 MeV', fontsize=20, transform=ax.transAxes)
plt.text(0.06, 0.82, r'$\tau = 3 \times 10^{25}$ s', fontsize=20, transform=ax.transAxes)
ax.set_xscale('log')
ax.set_yscale('log')
leg = plt.legend(handles=[plt_T_DM, plt_T_std, plt_T_CMB], loc=4)
plt.title(r'\bf{Temperature History}')
plt.xlabel(r'Redshift $(1+z)$')
plt.ylabel('Temperature [K]')
plt.axis([4, 3e3, 1e-1, 3e4])
ax = plt.subplot(1,2,2)
plt_xHII_DM, = plt.plot(rs_vec_DM, xHII_DM, label=r'$n_\mathrm{HII}/n_\mathrm{H}$, with DM')
plt_xHII_std, = plt.plot(rs_vec_DM, phys.xHII_std(rs_vec_DM), label=r'$n_\mathrm{HII}/n_\mathrm{H}$, no DM')
# Text
plt.text(0.06, 0.88, r'$\chi \to e^+e^-$, $m_\chi$ = 100 MeV', fontsize=20, transform=ax.transAxes)
plt.text(0.06, 0.82, r'$\tau = 3 \times 10^{25}$ s', fontsize=20, transform=ax.transAxes)
leg = plt.legend(handles=[plt_xHII_DM, plt_xHII_std], loc=4)
ax.set_xscale('log')
ax.set_yscale('log')
plt.title(r'\bf{Ionization History}')
plt.xlabel(r'Redshift $(1+z)$')
plt.ylabel(r'Ionization Fraction $x_\mathrm{HII}$')
plt.axis([4, 3e3, 1e-5, 3])
# -
# ### Dark Matter and Reionization
# Now it's time to combine dark matter energy injection with reionization! This is easily done by turning `reion_switch` to `True` to use the default reionization model. Again, for comparison, we provide the solution without and with backreaction turned on.
soln_DM_reion = tla.get_history(
rs_vec_DM, baseline_f = True,
inj_particle = 'elec', DM_process = 'decay',
mDM=1e8, lifetime = 3e25,
reion_switch=True,
helium_TLA=True
)
# To calculate the result with backreaction, [*main.evolve()*](https://darkhistory.readthedocs.io/en/latest/_autosummary/main/main.evolve.html) must be used, with `backreaction = True`. For the case of $\chi \to e^+e^-$ without any electroweak corrections, `primary` must be set to `elec_delta`. Since we are importing `main` for the first time, the transfer functions will be loaded. We'll compute both the results without reionization `soln_DM_BR` and `soln_DM_reion_BR`. Reionization is turned on by setting `reion_switch = True`, and uses the default reionization model. `helium_TLA` should be set to `True` as well for consistency.
#
# Without reionization, `coarsen_factor = 32` is a reasonable choice, since the temperature changes slowly with redshift. Coarsening with reionization should be used with caution, due to rapid changes in temperature that should be integrated over small step sizes.
# +
# %autoreload 2
import main
soln_DM_BR = main.evolve(
DM_process='decay', mDM=1e8, lifetime=3e25, primary='elec_delta',
start_rs = 3000,
coarsen_factor=32, backreaction=True
)
soln_DM_reion_BR = main.evolve(
DM_process='decay', mDM=1e8, lifetime=3e25, primary='elec_delta',
start_rs = 3000,
coarsen_factor=12, backreaction=True, helium_TLA=True, reion_switch=True
)
# -
# Let's compare the various solutions.
# +
# DM, no reionization, no backreaction.
Tm_DM = soln_DM[:,0]/phys.kB
xHII_DM = soln_DM[:,1]
# DM, no reionization, with backreaction.
rs_vec_BR = soln_DM_BR['rs']
Tm_DM_BR = soln_DM_BR['Tm']/phys.kB
xHII_DM_BR = soln_DM_BR['x'][:,0]
# DM, reionization, no backreaction.
Tm_DM_reion = soln_DM_reion[:,0]/phys.kB
xHII_DM_reion = soln_DM_reion[:,1]
# DM, reionization, with backreaction.
rs_vec_DM_reion_BR = soln_DM_reion_BR['rs']
Tm_DM_reion_BR = soln_DM_reion_BR['Tm']/phys.kB
xHII_DM_reion_BR = soln_DM_reion_BR['x'][:,0]
fig_reion=plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
plt_Tm_DM, = plt.plot(rs_vec_DM, Tm_DM, ':', linewidth=4, label='Dark Matter')
plt_Tm_DM_BR, = plt.plot(rs_vec_BR, Tm_DM_BR, ':', linewidth=4, label='DM+Backreaction')
plt_Tm_reion, = plt.plot(rs_vec, soln_default_reion[:,0]/phys.kB, 'k--', label='Reionization')
plt_Tm_DM_reion, = plt.plot(rs_vec_DM, Tm_DM_reion, label='Reion.+DM')
plt_Tm_DM_reion_BR, = plt.plot(rs_vec_DM_reion_BR, Tm_DM_reion_BR, label='Reion.+DM+Backreaction')
# Text
plt.text(0.37, 0.88, r'$\chi \to e^+e^-$, $m_\chi$ = 100 MeV', fontsize=20, transform=ax.transAxes)
plt.text(0.63, 0.82, r'$\tau = 3 \times 10^{25}$ s', fontsize=20, transform=ax.transAxes)
leg = plt.legend(handles=[plt_Tm_DM, plt_Tm_DM_BR, plt_Tm_reion, plt_Tm_DM_reion, plt_Tm_DM_reion_BR], loc=(0.33, 0.43))
plt.title(r'\bf{Temperature History}')
plt.xlabel('Redshift (1+z)')
plt.ylabel('Matter Temperature $T_m$ [K]')
plt.axis([4, 22, 0, 25000])
ax = plt.subplot(1,2,2)
plt_xHII_DM, = plt.plot(rs_vec_DM, xHII_DM, ':', linewidth=4, label='Dark Matter')
plt_xHII_DM_BR, = plt.plot(rs_vec_BR, xHII_DM_BR, ':', linewidth=4, label='DM+Backreaction')
plt_xHII_reion, = plt.plot(rs_vec, soln_default_reion[:,1], 'k--', label='Reionization')
plt_xHII_DM_reion, = plt.plot(rs_vec_DM, xHII_DM_reion, label=r'Reion.+DM')
plt_xHII_DM_reion_BR, = plt.plot(rs_vec_DM_reion_BR, xHII_DM_reion_BR, label='Reion.+DM+Backreaction')
# Text
plt.text(0.37, 0.88, r'$\chi \to e^+e^-$, $m_\chi$ = 100 MeV', fontsize=20, transform=ax.transAxes)
plt.text(0.63, 0.82, r'$\tau = 3 \times 10^{25}$ s', fontsize=20, transform=ax.transAxes)
leg = plt.legend(handles=[plt_xHII_DM, plt_xHII_DM_BR, plt_xHII_reion, plt_xHII_DM_reion, plt_xHII_DM_reion_BR], loc=(0.33, 0.43))
plt.title(r'\bf{Ionization History}')
plt.xlabel('Redshift (1+z)')
plt.ylabel(r'Ionization Fraction $x_\mathrm{HII}$')
plt.axis([4, 22, 0, 1.5])
# -
# ## Bibliography
# [1]<a id='cite_rates1'></a> <NAME>, <NAME>, <NAME>, and <NAME>, “Consistent modelling of the meta-galactic UV background and the thermal/ionization history of the intergalactic medium,” (2018), arXiv:1801.04931 [astro-ph.GA]
#
# [2]<a id='cite_rates2'></a> <NAME> and <NAME>, “The nature and evolution of the highly ionized near-zones in the absorption spectra of z =6 quasars,” Mon. Not. Roy. Astron. Soc. 374, 493–514 (2007), arXiv:astro-ph/0607331 [astro-ph].
#
# [3]<a id='cite_rates3'></a> <NAME> and <NAME>, “The nature and evolution of the highly ionized near-zones in the absorption spectra of z =6 quasars,” Mon. Not. Roy. Astron. Soc. 374, 493–514 (2007), arXiv:astro-ph/0607331 [astro-ph].
#
# [4]<a id='cite_tanh'></a> <NAME>, “Cosmological parameters from WMAP 5-year temperature maps,” Phys. Rev. D78, 023002 (2008), arXiv:0804.3865 [astro-ph].
| examples/.ipynb_checkpoints/Example_8_Ionization_and_Thermal_History_Copy1_20210621-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: dev
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''PythonDataV2'': conda)'
# name: python3
# ---
# # f_neuralNetwork_nonerror_features
# ----
#
# Written in the Python 3.7.9 Environment with the following package versions
#
# * joblib 1.0.1
# * numpy 1.19.5
# * pandas 1.3.1
# * scikit-learn 0.24.2
# * tensorflow 2.5.0
#
# By <NAME>
#
# This Jupyter Notebook tunes a neural network model for Exoplanet classification from Kepler Exoplanet study data.
#
# Column descriptions can be found at https://exoplanetarchive.ipac.caltech.edu/docs/API_kepcandidate_columns.html
#
# **Source Data**
#
# The source data used was provided by University of Arizona's Data Analytics homework assignment. Their data was derived from https://www.kaggle.com/nasa/kepler-exoplanet-search-results?select=cumulative.csv
#
# The full data set was released by NASA at
# https://exoplanetarchive.ipac.caltech.edu/cgi-bin/TblView/nph-tblView?app=ExoTbls&config=koi
# +
# Import Dependencies
# Plotting
# %matplotlib inline
import matplotlib.pyplot as plt
# Data manipulation
import numpy as np
import pandas as pd
from statistics import mean
from operator import itemgetter
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from tensorflow.keras.utils import to_categorical
# Parameter Selection
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
# Model Development
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
# Model Metrics
from sklearn.metrics import classification_report
# Save/load files
from tensorflow.keras.models import load_model
import joblib
# # Ignore deprecation warnings
# import warnings
# warnings.simplefilter('ignore', FutureWarning)
# -
# Set the seed value for the notebook, so the results are reproducible
from numpy.random import seed
seed(1)
# # Read the CSV and Perform Basic Data Cleaning
# +
# Import data
df = pd.read_csv("../b_source_data/exoplanet_data.csv")
# print(df.info())
# Drop columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop rows containing null values
df = df.dropna()
# Display data info
print(df.info())
print(df.head())
print(df.koi_disposition.unique())
# -
# Rename "FALSE POSITIVE" disposition values
df.koi_disposition = df.koi_disposition.str.replace(' ','_')
print(df.koi_disposition.unique())
# # Select features
#
# Split dataframe into X and y
X = df[['koi_fpflag_nt', 'koi_fpflag_ss', 'koi_fpflag_co', 'koi_fpflag_ec', 'koi_period', 'koi_time0bk', 'koi_impact', 'koi_duration','koi_depth', 'koi_prad', 'koi_teq', 'koi_insol', 'koi_model_snr', 'koi_tce_plnt_num', 'koi_steff', 'koi_slogg', 'koi_srad', 'ra', 'dec', 'koi_kepmag']]
y = df["koi_disposition"]
print(X.shape, y.shape)
# # Create a Train Test Split
#
# Use `koi_disposition` for the y values
# Split X and y into training and testing groups
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42)
# Display training data
X_train.head()
# # Pre-processing
# Scale the data with MinMaxScaler
X_scaler = MinMaxScaler().fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# +
# One-Hot-Encode the y data
# Step 1: Label-encode data set
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
encoded_y_train = label_encoder.transform(y_train)
encoded_y_test = label_encoder.transform(y_test)
# Step 2: Convert encoded labels to one-hot-encoding
y_train_categorical = to_categorical(encoded_y_train)
y_test_categorical = to_categorical(encoded_y_test)
# -
print('Unique KOI Disposition Values')
print(y.unique())
print('-----------')
print('Sample KOI Disposition Values and Encoding')
print(y_test[:5])
print(y_test_categorical[:5])
# # Hyperparameter Tuning
#
# Use `GridSearchCV` to tune the model's parameters
# +
# Code was modified from sample code presented on
# https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/
# Function to create model, required for KerasClassifier
def create_model(neurons=20):
# create model
model = Sequential()
model.add(Dense(neurons, input_dim=X_train_scaled.shape[1], activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(units=y_train_categorical.shape[1], activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# +
# Code was modified from sample code presented on
# https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/
# Use scikit-learn to grid search the batch size and epochs
# create model
grid_model = KerasClassifier(build_fn=create_model, verbose=0)
# define the grid search parameters
batch_size = [10, 20]
epochs = [100, 1000]
neurons = [5, 10, 15, 20]
param_grid = dict(batch_size=batch_size, epochs=epochs, neurons=neurons)
# Apply GridSearchCV
grid = GridSearchCV(estimator=grid_model, param_grid=param_grid, n_jobs=-1, cv=3)
grid_result = grid.fit(X_train_scaled, y_train_categorical)
# summarize results
print("--------------------------")
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# -
# # Create and Train the Model - Neural Network
# +
# Create model
nn_model = Sequential()
# Define first layer
nn_model.add(Dense(units=20,
activation='relu', input_dim=X_train_scaled.shape[1]))
# Define output layer
nn_model.add(Dense(units=y_train_categorical.shape[1], activation='softmax'))
# Review Model
print(nn_model.summary())
# Compile Model
nn_model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train model
nn_model.fit(
X_train_scaled,
y_train_categorical,
epochs=1000,
batch_size=20,
shuffle=True,
verbose=0
)
# Evaluate the model using the testing data
model_loss, model_accuracy = nn_model.evaluate(
X_test_scaled, y_test_categorical, verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# -
# # Option 2: Model Results when using all features not associated with error measurements
# * Grid Definition:
# * batch_size = [10, 20]
# * epochs = [100, 1000]
# * neurons = [5, 10, 15, 20]
# * Grid Best Result: Best: 0.894543 using {'batch_size': 20, 'epochs': 1000, 'neurons': 20}
# * Tuned Model Results: Loss: 0.25760945677757263, Accuracy: 0.894184947013855
# # Save the Model
# Save the model results
nn_model.save("./f_neuralNetwork_nonerror_Features_model.h5")
# # Model Discussion
#
# The model score using the neural network method is one of the best for predicting exoplanet observations. These results limiting the input features were comparable to the model utilizing all of the feature inputs. The hyperparameter tuning is very slow.
| f_NN/f_neuralNetwork_nonerror_features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="PtbKyLoi2YZc"
# # Convex Optimization for Machine Learning & Computer Vision
# ## Week 6 Programming Exercise - Multinomial Logistic Regression
# | Name | Matriculation Number | E-mail |
# | :------- | :----------------------- | :--------- |
# | <NAME> | 03697290 | [<EMAIL>](mailto:<EMAIL>) |
# + colab={} colab_type="code" id="_MyPGVKU2WOh"
import numpy as np
import matplotlib.pyplot as plt
from urllib import request
import os
import gzip
import shutil
# + colab={} colab_type="code" id="PLeuz7jgq2sR"
# Toy data related
_TOY_DATA_CLUSTER_CENTERS = ((2, -4), (-3, 3), (3, 1), (-5, -2), (-4, -5))
_TOY_DATA_COLORS = ('g', 'r', 'b', 'm', 'y')
def get_toy_data(classes=5,
samples_per_class=500,
feature_dim=2,
noise_dim=100,
feature_sigma=1.0,
noise_sigma=3.0,
cluster_centers=_TOY_DATA_CLUSTER_CENTERS,
test_ratio=0.1,
visualize=True):
num_samples = samples_per_class * classes
total_dim = feature_dim + noise_dim + 1
data = np.zeros((num_samples, total_dim))
for j in range(classes):
# generate useful features
data[j*samples_per_class:(j+1)*samples_per_class, 0:feature_dim] = \
np.array(cluster_centers[j]).reshape(1, feature_dim) + \
np.random.randn(samples_per_class, feature_dim) * feature_sigma
# generate random features
data[j*samples_per_class:(j+1)*samples_per_class,
feature_dim:total_dim-1] = \
np.random.randn(samples_per_class, noise_dim) * noise_sigma
# generate ground-truth labels
data[j*samples_per_class:(j+1)*samples_per_class, total_dim-1] = \
np.ones(samples_per_class) * j
# shuffle data
data = np.random.permutation(data)
# Split data into training and test set
sp_idx = num_samples - int(test_ratio * num_samples)
train_data = data[:sp_idx, :]
test_data = data[sp_idx:, :]
# visualize if asked to
if visualize:
plt.figure('train_data')
train_colors = [_TOY_DATA_COLORS[int(c)] for c in train_data[:, -1]]
plt.scatter(train_data[:, 0], train_data[:, 1], c=train_colors)
plt.figure('test_data')
test_colors = [_TOY_DATA_COLORS[int(c)] for c in test_data[:, -1]]
plt.scatter(test_data[:, 0], test_data[:, 1], c=test_colors)
plt.show()
return train_data, test_data, classes
# + colab={} colab_type="code" id="4F6AR0_iq-7J"
# MNIST data related
MNIST_CLASSES = 10
_MNIST_URL_BASE = 'http://yann.lecun.com/exdb/mnist/'
_MNIST_FILES = {'train_image': 'train-images-idx3-ubyte',
'train_label': 'train-labels-idx1-ubyte',
'test_image': 't10k-images-idx3-ubyte',
'test_label': 't10k-labels-idx1-ubyte'}
def download_unzip_gz(from_url, save_as):
print('Downloading from {}'.format(from_url))
with request.urlopen(from_url) as src, open(save_as, 'wb') as dst:
shutil.copyfileobj(gzip.GzipFile(fileobj=src), dst)
print('Saved as {}'.format(save_as))
def load_MNIST_images(from_file):
with open(from_file, 'rb') as content:
return np.frombuffer(content.read(), np.uint8, offset=16
).reshape(-1, 28*28)
def load_MNIST_labels(from_file):
with open(from_file, 'rb') as content:
return np.frombuffer(content.read(), np.uint8, offset=8).reshape(-1, 1)
def visualize_MNIST_data(data, rows=6, cols=10):
out = np.zeros((28 * rows, 28 * cols))
for r in range(rows):
for c in range(cols):
out[r*28:(r+1)*28, c*28:(c+1)*28] = \
data[r+rows*c, :-1].reshape(28, 28)
plt.figure('MNIST training image examples')
plt.imshow(out / 255., cmap='gray')
plt.show()
def get_MNIST_data(save_dir='.', visualize=True):
# download MNIST data if not present already
for filename in _MNIST_FILES.values():
filepath = os.path.join(save_dir, filename)
if os.path.exists(filepath):
print('Found {}'.format(filepath))
else:
download_unzip_gz(_MNIST_URL_BASE+filename+'.gz', filepath)
# load MNIST data
train_image = load_MNIST_images(
os.path.join(save_dir, _MNIST_FILES['train_image']))
train_label = load_MNIST_labels(
os.path.join(save_dir, _MNIST_FILES['train_label']))
train_data = np.concatenate((train_image, train_label),
axis=1).astype(np.float64)
test_image = load_MNIST_images(
os.path.join(save_dir, _MNIST_FILES['test_image']))
test_label = load_MNIST_labels(
os.path.join(save_dir, _MNIST_FILES['test_label']))
test_data = np.concatenate((test_image, test_label),
axis=1).astype(np.float64)
# visualize examples if required
visualize_MNIST_data(train_data)
return train_data, test_data, MNIST_CLASSES
# + colab={} colab_type="code" id="tciloI_5rCHh"
# optimization related
def softmax(z, dim):
ez = np.exp(z - np.max(z, dim, keepdims=True))
return ez / np.sum(ez, dim, keepdims=True)
def log_softmax(z, dim):
dz = z - np.max(z, dim, keepdims=True)
return dz - np.log(np.sum(np.exp(dz), dim, keepdims=True))
def classifier(X, W, b):
return X @ W + b
def score(X, W, b):
return softmax(classifier(X, W, b), 1)
def average_loss(X, y, W, b):
y = y.astype(np.long)
return np.mean(-log_softmax(classifier(X, W, b), 1)[np.arange(len(y)), y])
# the differentiable objective
def objective_diff(X, y, W, b, lambda1, lambda2):
return average_loss(X, y, W, b) + 0.5 * lambda2 * np.sum(np.square(b)) + \
0.5 * lambda1 * np.sum(np.square(W))
# the non-differentiable objective
def objective_nondiff(X, y, W, b, lambda1, lambda2):
return average_loss(X, y, W, b) + 0.5 * lambda2 * np.sum(np.square(b)) + \
0.5 * lambda1 * np.sum(np.sqrt(np.sum(np.square(W), 1)))
# + [markdown] colab_type="text" id="1D1VkKWJdWL9"
# It is [straighhtforward to show](https://math.stackexchange.com/questions/1428344/what-is-the-derivation-of-the-derivative-of-softmax-regression-or-multinomial-l) that the gradient of the log-softmax loss can be re-written in vector form as the following.
#
# $
# \begin{align}
# \nabla_{\mathbf{W}}\,\mathcal{l}(\mathbf{W}, \mathbf{b}, \mathbf{x}, \mathbf{y}) & = & -\frac{1}{N}\mathbf{x}^T\Big( \mathbf{y} - \sigma(\mathbf{W}^T\mathbf{x} + \mathbf{b})\Big)\\
# \nabla_{\mathbf{b}}\,\mathcal{l}(\mathbf{W}, \mathbf{b}, \mathbf{x}, \mathbf{y}) & = & -\frac{1}{N} \Big( \mathbf{y} - \sigma(\mathbf{W}^T\mathbf{x} + \mathbf{b})\Big).
# \end{align}
# $
#
# Where $\mathbf{y} \in \{0, 1\}^K$ is a one-hot vector with $y_k = 1$ for ground-truth class $k$ and $\sigma(.)$ is the soft-max function.
#
# + colab={} colab_type="code" id="SZDn7joF4Jxg"
# gradient related
# one hot representation
def one_hot(t, c):
return np.eye(c)[t.astype(np.int)]
# TODO: gradients of the average logsoftmax loss w.r.t. W
def grad_W_avgloss(X, score, y):
return np.matmul(X.T, (score - one_hot(y, score.shape[1]))) / X.shape[0]
# TODO: gradients of the average logsoftmax loss w.r.t. b
# You can use e.g. scipy.optimize.minimize to compare your result for the
# differentiable objective objective_diff.
def grad_b_avgloss(score, y):
return np.mean(score - one_hot(y, score.shape[1]), axis=0)
# TODO: gradients of the square regularization term for W in objective_diff
def grad_W_sqreg(W, lambda1):
return lambda1 * W
# TODO: gradients of the square regularization term for b in both objectives
def grad_b_sqreg(b, lambda2):
return lambda2 * b
# + [markdown] colab_type="text" id="PankhOBYX9lV"
# For $J(\mathbf{X})=\frac{\lambda_1}{2}\left\Vert{\mathbf{X}}\right\Vert_{1, 2}$, the definition of the proximal operator gives
#
#
# $
# \begin{align*}
# \because\; & \text{prox}_{\tau J}\big(\mathbf{X}\big) & :=\; & \arg \min_\mathbf{Y} J(\mathbf{Y}) + \frac{1}{2\tau}\left\Vert \mathbf{Y} - \mathbf{X} \right\Vert^2_2 & & \\
# \Rightarrow & & =\; & \arg \min_\mathbf{Y} \frac{\lambda_1}{2}\left\Vert{\mathbf{Y}}\right\Vert_{1, 2} + \frac{1}{2\tau}\left\Vert \mathbf{Y} - \mathbf{X} \right\Vert^2_2 & & \\
# \Rightarrow & & =\; & \arg \min_\mathbf{Y} \frac{\lambda_1}{2}\Big(\left\Vert{\mathbf{Y}}\right\Vert_{1, 2} + \frac{1}{2}\frac{2}{\tau\lambda_1}\left\Vert \mathbf{Y} - \mathbf{X} \right\Vert^2_2\Big) & & \\
# \Rightarrow & & =\; & \arg \min_\mathbf{Y}\left\Vert{\mathbf{Y}}\right\Vert_{1, 2} + \frac{1}{2\tau'}\left\Vert \mathbf{Y} - \mathbf{X} \right\Vert^2_2\;&,\;\;\;\; & \tau'=\frac{\tau\lambda_1}{2}\\
# \therefore\; & \text{prox}_{\tau J}\big(\mathbf{X}\big) & =\; & \text{prox}_{\tau' \left\Vert . \right\Vert_{1, 2}}\big(\mathbf{X}\big)\;&,\;\;\;\; & \tau'=\frac{\tau\lambda_1}{2}
# \end{align*}
# $
#
# As we already know from [theoretical exercise 6.4](https://github.com/uzairakbar/convex-optimization/blob/master/exercises/week6/solutionSubmission6.pdf), the proximal operator for the $l_{1, 2}$-norm is given as:
#
# $
# {prox}_{\tau' \left\Vert . \right\Vert_{1, 2}}\big(\mathbf{X}\big) = \Big\{ \mathbf{Y} \in \mathbf{R}^{m\times n} \;\Big|\; \mathbf{Y}_i=\begin{cases}
# \mathbf{0}\; & , & \;\left\Vert \mathbf{X}_i \right\Vert_2 \leq \tau'\\
# \mathbf{X}_i - \tau'\frac{\mathbf{X}_i}{\left\Vert \mathbf{X}_i \right\Vert_2}\; & , & \;\left\Vert \mathbf{X}_i \right\Vert_2 \gt \tau'
# \end{cases}\Big\}
# $
# + colab={"base_uri": "https://localhost:8080/", "height": 911} colab_type="code" id="bROWhoG6Hztd" outputId="ab95bb1d-98c4-4b21-dc74-<KEY>"
np.random.seed(43) # manual random seed for reproducible results
use_mnist = True # REMARK: set to true once your code works on toy data
# get data
if use_mnist:
train_data, test_data, classes = get_MNIST_data(save_dir='.',
visualize=True)
else:
train_data, test_data, classes = get_toy_data(visualize=True)
X, X_test = train_data[:, :-1], test_data[:, :-1]
feature_dim = X.shape[1]
y, y_test = train_data[:, -1], test_data[:, -1]
# Normalize input data
X, X_test = X / np.max(X), X_test / np.max(X_test)
# initialize parameters to be trained
W = np.random.rand(feature_dim, classes)
b = np.zeros((1, classes))
# TODO(optional): Do derivative check (for debugging only).
# You can use scipy.optimize.grad_check on objective_diff to verify the 4
# gradient functions you have implemented.
pass
# define optimization parameters
lambda1, lambda2 = 0.01, 0.02
max_iter = 6000
if use_mnist:
tau = 0.7
else:
tau = 1 / (np.mean(np.sum(np.square(X), axis=1)) + lambda1)
threshold = 1e-12
# define lists that should be filled up in the following updates
obj_val = []
train_error = []
test_error = []
# perform proximal gradient iterations
for i in range(max_iter):
# TODO: implement proximal gradient here...
########################################################################
# calculate the train and test score
score_train, score_test = score(X, W, b), score(X_test, W, b)
# calculate the train and test prediction
prediction_train, prediction_test = (np.argmax(score_train, axis=1),
np.argmax(score_test, axis=1))
# calculate objective value, test error and training error
obj_val.append(objective_nondiff(X, y, W, b, lambda1, lambda2))
train_error.append(np.mean((prediction_train != y
).astype(np.float))*100.)
test_error.append(np.mean((prediction_test != y_test
).astype(np.float))*100.)
# do gradient descent iteration on the differentiable objective
W = W - tau * grad_W_avgloss(X, score_train, y)
b = b - tau * (grad_b_avgloss(score_train, y) + grad_b_sqreg(b, lambda2))
# apply proximal operator of 1,2-norm to W
row_l2_norm = np.sqrt(np.sum(np.square(W), axis=1))
tau_ = tau*lambda1/2
W[row_l2_norm <= tau_, :] = 0
W[row_l2_norm > tau_, :] = W[row_l2_norm > tau_, :] - \
tau_ * W[row_l2_norm > tau_, :] / row_l2_norm[row_l2_norm > tau_, np.newaxis]
# display current results every 500 iterations
if i%500 == 0:
print('iteration={}\t |\t obj={:.2f}\t |\t train={:.2f}\t |\t test={:.2f}'.format(
i, obj_val[-1], train_error[-1], test_error[-1]))
########################################################################
# evalualte stopping criterion
if i > 0 and obj_val[i-1] - obj_val[i] < threshold:
break
# Plot objective value, train / test loss
fig = plt.figure('Results')
fig.set_size_inches(15, 5)
ax = fig.add_subplot(1, 3, 1)
ax.plot(range(len(obj_val)), obj_val, 'r')
ax.set_title("Objective Value")
ax.grid()
ax = fig.add_subplot(1, 3, 2)
ax.plot(range(len(train_error)), train_error, 'g')
ax.set_title("Train Error")
ax.grid()
ax = fig.add_subplot(1, 3, 3)
ax.plot(range(len(test_error)), test_error, 'b')
ax.set_title("Test Error")
ax.grid()
plt.show()
# plot classifier for toy data case
if not use_mnist:
s = 0.1
x1Grid, x2Grid = np.meshgrid(np.arange(-4, 4, s), np.arange(-4, 4, s))
xGrid = np.stack((x1Grid.flatten(), x2Grid.flatten()), axis=1)
scores_train = classifier(xGrid, W[0:2, :], b)
plt.figure('classifier')
test_colors = [_TOY_DATA_COLORS[int(c)] for c in test_data[:, -1]]
plt.scatter(test_data[:, 0], test_data[:, 1], c=test_colors)
for c in range(classes):
color = _TOY_DATA_COLORS[c]
plt.contour(x1Grid, x2Grid,
scores_train[:, c].reshape(x1Grid.shape),
levels=1, colors=color)
plt.show()
# + [markdown] colab_type="text" id="y1HYaeChpGEg"
# The useless features are assigned low weights in $\mathbf{W}^\ast$, which are the background pixels that occupy most of the area in the image (therefore, the closer a pixel is to the edge of the image, the higher the likelihood it belongs to the background and subsequently the edge pixels/features will be assigned low weights).
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="4J7WV4hVn9vp" outputId="8ec2f861-3d5a-4049-d2eb-2874fffd1ce2"
# visualization of feature importance
plt.imshow(np.abs(W.T), cmap='hot', interpolation='nearest', aspect='auto')
plt.xlabel("feature index")
plt.ylabel("class")
plt.title("Feature Importance - Absolute Feature Weights")
plt.show()
# + [markdown] colab_type="text" id="ayUH6PCwqfap"
# Therefore, $l_{1, 2}$ norm performs better because it promotes sparsity, and useless features are ignored in classification. In contrast, $l_2$ norm promotes dense weights, which encourages the model to use irrelevant features despite no predictive power in terms of labels.
# + [markdown] colab_type="text" id="P0fSbTVSHCsI"
# ***
| exercises/week6/programmingSubmission6/logisticRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# # !pip install matplotlib
#import the required packages
import os
import time
from tensorflow import keras
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from IPython import display
from keras.preprocessing.image import ImageDataGenerator
# + endofcell="--"
gpus = tf.config.list_physical_devices('GPU')
gpus
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
# # +
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# -
# --
img_height, img_width = 64, 64
batch_size = 51
# + endofcell="--"
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
'./MiriSegal',
image_size=(img_height, img_width),
batch_size=batch_size,
label_mode=None)
plt.figure(figsize=(10, 10))
for images in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.axis("off")
for image_batch in train_ds:
print(image_batch.shape)
break
tf.data.experimental.AUTOTUNE
AUTOTUNE = tf.data.experimental.AUTOTUNE
# # +
# train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
# -
normalization_layer = layers.experimental.preprocessing.Rescaling(scale= 1./127.5, offset=-1)
normalized_ds = train_ds.map(lambda x: normalization_layer(x))
image_batch = next(iter(normalized_ds))
first_image = image_batch[0]
print(np.min(first_image), np.max(first_image))
# --
latent_dim = 100
noise_dim = (1,1,latent_dim)
def generator_model():
inputs = keras.Input(shape=noise_dim, name='input_layer')
x = layers.Conv2DTranspose(64 * 8, kernel_size=4, strides= 4, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_1')(inputs)
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_1')(x)
x = layers.ReLU(name='relu_1')(x)
x = layers.Conv2DTranspose(64 * 4, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_2')(x)
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_2')(x)
x = layers.ReLU(name='relu_2')(x)
x = layers.Conv2DTranspose(64 * 2, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_3')(x)
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_3')(x)
x = layers.ReLU(name='relu_3')(x)
x = layers.Conv2DTranspose(64 * 1, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, name='conv_transpose_4')(x)
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_4')(x)
x = layers.ReLU(name='relu_4')(x)
outputs = layers.Conv2DTranspose(3, 4, 2,padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, activation='tanh', name='conv_transpose_5')(x)
model = tf.keras.Model(inputs, outputs, name="Generator")
return model
generator = generator_model()
generator.save('dcgan_gen.h5')
generator.summary()
def discriminator_model():
inputs = keras.Input(shape=(64, 64, 3), name='input_layer')
x = layers.Conv2D(64, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, name='conv_1')(inputs)
x = layers.LeakyReLU(0.2, name='leaky_relu_1')(x)
x = layers.Conv2D(64 * 2, kernel_size=4, strides= 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, name='conv_2')(x)
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_1')(x)
x = layers.LeakyReLU(0.2, name='leaky_relu_2')(x)
x = layers.Conv2D(64 * 4, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, name='conv_3')(x)
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_2')(x)
x = layers.LeakyReLU(0.2, name='leaky_relu_3')(x)
x = layers.Conv2D(64 * 8, 4, 2, padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, name='conv_4')(x)
x = layers.BatchNormalization(momentum=0.1, epsilon=0.8, center=1.0, scale=0.02, name='bn_3')(x)
x = layers.LeakyReLU(0.2, name='leaky_relu_4')(x)
outputs = layers.Conv2D(1, 4, 4,padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.02), use_bias=False, activation='sigmoid', name='conv_5')(x)
outputs = layers.Flatten()(outputs)
model = tf.keras.Model(inputs, outputs, name="Discriminator")
return model
discriminator = discriminator_model()
discriminator.save('dcgan_disc.h5')
discriminator.summary()
# +
binary_cross_entropy = tf.keras.losses.BinaryCrossentropy()
def generator_loss(label, fake_output):
gen_loss = binary_cross_entropy(label, fake_output)
#print(gen_loss)
return gen_loss
def discriminator_loss(label, output):
disc_loss = binary_cross_entropy(label, output)
#print(total_loss)
return disc_loss
learning_rate = 0.0002
generator_optimizer = tf.keras.optimizers.Adam(lr = 0.0002, beta_1 = 0.5, beta_2 = 0.999 )
discriminator_optimizer = tf.keras.optimizers.Adam(lr = 0.0002, beta_1 = 0.5, beta_2 = 0.999 )
# -
num_examples_to_generate = 25
# We will reuse this seed overtime to visualize progress
seed = tf.random.normal([num_examples_to_generate, 1, 1, latent_dim])
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
# @tf.function
def train_step(images, epoch):
# noise vector sampled from normal distribution
noise = tf.random.normal([BATCH_SIZE, 1, 1, latent_dim])
real_output = discriminator(images, training=True)
perf_real = real_output.numpy().mean()
generated_images = generator(noise, training=True)
fake_output = discriminator(generated_images, training=True)
perf_fake = fake_output.numpy().mean()
if perf_real > 0.8 and perf_fake < 0.4:
print('SKIPPING discriminator training, real data: %f, fake data: %f' % (perf_real, perf_fake))
skip_next = True
else:
print('Trying discriminator training, real data: %f, fake data: %f' % (perf_real, perf_fake))
skip_next = False
# we train discriminator only every 10th step to prevent it from being too strong
if not skip_next:
# Train Discriminator with real labels
with tf.GradientTape() as disc_tape1:
real_output = discriminator(images, training=True)
perf_real = real_output.numpy().mean()
print('real data: ',perf_real)
real_targets = tf.ones_like(real_output)
disc_loss1 = discriminator_loss(real_targets, real_output)
# gradient calculation for discriminator for real labels
gradients_of_disc1 = disc_tape1.gradient(disc_loss1, discriminator.trainable_variables)
# parameters optimization for discriminator for real labels
discriminator_optimizer.apply_gradients(zip(gradients_of_disc1,\
discriminator.trainable_variables))
# Train Discriminator with fake labels
with tf.GradientTape() as disc_tape2:
fake_output = discriminator(generated_images, training=True)
fake_targets = tf.zeros_like(fake_output)
disc_loss2 = discriminator_loss(fake_targets, fake_output)
perf_fake = fake_output.numpy().mean()
print('fake data: ',perf_fake)
# gradient calculation for discriminator for fake labels
gradients_of_disc2 = disc_tape2.gradient(disc_loss2, discriminator.trainable_variables)
# parameters optimization for discriminator for fake labels
discriminator_optimizer.apply_gradients(zip(gradients_of_disc2,\
discriminator.trainable_variables))
# Train Generator with real labels
with tf.GradientTape() as gen_tape:
generated_images = generator(noise, training=True)
fake_output = discriminator(generated_images, training=True)
real_targets = tf.ones_like(fake_output)
gen_loss = generator_loss(real_targets, fake_output)
# gradient calculation for generator for real labels
gradients_of_gen = gen_tape.gradient(gen_loss, generator.trainable_variables)
# parameters optimization for generator for real labels
generator_optimizer.apply_gradients(zip(gradients_of_gen,\
generator.trainable_variables))
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
i = 0
D_loss_list, G_loss_list = [], []
for image_batch in dataset:
i += 1
skip_next = train_step(image_batch, epoch)
print(epoch)
if epoch % 50 ==0:
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
# Save the model every 15 epochs
if (epoch + 1) % 500 == 0:
generator.save_weights('training_weights/gen_'+ str(epoch)+'.h5')
discriminator.save_weights('training_weights/disc_'+ str(epoch)+'.h5')
# Generate after the final epoch
# display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
seed)
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
print(predictions.shape)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(5, 5, i+1)
pred = (predictions[i, :, :, :] + 1 ) * 127.5
pred = np.array(pred)
plt.imshow(pred.astype(np.uint8))
plt.axis('off')
plt.savefig('images/image_at_epoch_{:d}.png'.format(epoch))
plt.show()
train(normalized_ds, 10000)
datagen = ImageDataGenerator(
rotation_range=20,
zoom_range=0.1,
width_shift_range=0.05,
height_shift_range=0.05,
horizontal_flip=False,
vertical_flip=False,
fill_mode='constant', cval=1.0)
for a in normalized_ds.take(-1):
pass
dg = datagen.flow(a,batch_size=128)
def train2(dataset, epochs):
for epoch in range(epochs):
start = time.time()
i = 0
D_loss_list, G_loss_list = [], []
for ii in range(3):
image_batch = dataset.next()
i += 1
skip_next = train_step(image_batch, epoch)
print(epoch)
if epoch % 50 ==0:
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
# Save the model every 50 epochs
if (epoch + 1) % 50 == 0:
generator.save_weights('training_weights/gen_'+ str(epoch)+'.h5')
discriminator.save_weights('training_weights/disc_'+ str(epoch)+'.h5')
# Generate after the final epoch
# display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
seed)
train2(dg, 10000)
| animal-gan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
import time
for id in range(100,120):
r=requests.get("http://graph.facebook.com/{}/picture?type=large".format(id))
with open("/Users/jappanjeetsingh/Desktop/profile-pics/profile{}.jpg".format(id),"wb") as file:
file.write(r.content)
| Web Scraping (Beautiful Soup, Scrapy, Selenium)/webScraping_Day17/Scrape-ImgsFBGraph(Api)/solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Multislice Simulations
#
# **<NAME>**<br>
# Northwestern University
#
# - The multislice method of Cowley and Moodie[1](https://doi.org/10.1107/S0365110X57002194) is, surely, the most widely used method for the simulation of images.
#
# - Multislice
#
# - This script is based on [Kirkland's Advanced Computing in Electron Microscopy](https://www.springer.com/us/book/9781489995094#otherversion=9781441965325)
# +
# Numeric operations
import numpy as np
# Visualization / Plotting
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
from numpy.matlib import repmat
#to load .mat files
from scipy.io import loadmat
from scipy.special import jv
from scipy.io import loadmat
# for math functions
import scipy.special as sp
from scipy.interpolate import interp1d
from scipy.special import expit
import math
from mpmath import *
from scipy.special import kn
import os
# Ensure that images are rendered in this notebook:
# %matplotlib inline
# -
# ### Define structure here
#
# - Essentially at he end of this block you need an array with x,y,z positions and Z number for each element tiled by the number of unit cells needed for simulations in [x,y] directions
#
# - This can be defined using softwares like Vesta or CrystalMaker
#
# - You can find similar code in the Week 5 material
# +
numUC = np.array([1,1])*8
thickness = 100
## Define cubic SrTiO3
#Lattice parameter
a = 3.905
# Cell dimensions
cellDim = np.array([1,1,4])*a
#Debye waller factors converted into RMS atomic displacements
uSr = np.sqrt(0.270/8)/np.pi
uTi = np.sqrt(0.415/8)/np.pi
uO = 0.10
#Define STO lattice
b = np.array([[0.0, 0.0, 0.0, 38],
[0.5, 0.5, 0.5, 22],
[0.0, 0.0, 0.5, 8],
[0.0, 0.5, 0.0, 8],
[0.5, 0.0, 0.0, 8]])
#Convert thicknesses into number of cells
thickCells = np.round(thickness/cellDim[2])
thickTotal = np.max(thickCells)
# Make structure block
[xa,ya,za] = np.meshgrid(np.arange(0,(numUC[0]-1)), np.arange(0,(numUC[1]-1)), 1)
xxa = np.reshape(xa, (1,np.product(xa.shape))).transpose()
yya = np.reshape(ya, (1,np.product(ya.shape))).transpose()
zza = np.reshape(za, (1,np.product(za.shape))).transpose()
p = np.squeeze(np.array([xxa, yya, zza, np.zeros(((xa.size),1))]))
p= p.transpose()
[pa,ba] = np.meshgrid(np.arange(0, np.size(p, axis=0)), np.arange(0, np.size(b, axis=0)))
atoms = p[pa[:],] + b[ba[:],]
atoms = atoms.reshape((atoms.shape[0]*atoms.shape[1]), atoms.shape[2])
# scale to UC dimensions
atoms[:,0] = atoms[:,0]*cellDim[0]
atoms[:,1] = atoms[:,1]*cellDim[1]
atoms[:,2] = atoms[:,2]*cellDim[2]
# +
import plotly.graph_objects as go
import numpy as np
# Data for three-dimensional scattered points
zdata = atoms[:,2]
xdata = atoms[:,0]
ydata = atoms[:,1]
fig = go.Figure(data=[go.Scatter3d(x=xdata, y=ydata, z=zdata,
mode='markers',
marker=dict(size=12,
colorscale='Viridis', # choose a colorscale
opacity=0.8))])
fig.show()
# -
# ### Simulation Inputs
#
# +
#
numFP =1
#Flag to plot projected potentials
f_plotPot = 1
#Probe positions
Nprobes = np.array([1,1])*8
# xp = np.linspace(0,cellDim[0]*1, num = Nprobes[0])
# xp[-1] = 0
# yp = np.linspace(0,cellDim[1]*1, num = Nprobes[1])
# yp[-1] = 0
# for testing use single probe
xp = [0,1]
yp = [0,1]
# -
# ### Microscope parameters
# +
#Approximate pixel size - if cell is rectangular, pixel size in x and y will not be identical
pSize = 0.05*2
potBound = 1.25
df = 0 #focus on incident surface
# Probe Aberrations
C3 = -.000 * 10**7
C5 = 0.0 * 10**7
#illumination angle in mrads
alphaMax = 10/1000;
#Microscope voltage
E0 = 120*(10**3)
#Calculate wavelength and electron interaction parameter
m = 9.109383*10**-31
e = 1.602177*10**-19
c = 299792458
h = 6.62607*10**-34
#wavelength in A
lamb = h/np.sqrt(2*m*e*E0)/np.sqrt(1 + e*E0/2/m/c**2)*10**10
s = (2*np.pi/lamb/E0)*(m*c**2+e*E0)/(2*m*(c**2)+e*E0)
# number of slices per cell defined using the z direction
dz = cellDim[2]/2;
# -
# ### Prepare RealSpace coordinate system
# +
#Make sure even number of pixels per cell
Nx = np.ceil(numUC[0]*cellDim[0]/pSize/2)*2
Ny = np.ceil(numUC[1]*cellDim[1]/pSize/2)*2
xSize = numUC[0]*cellDim[0]/Nx
ySize = numUC[1]*cellDim[1]/Nx
xySize = np.array([xSize,ySize]) # this is the pixelsize in realspace
# -
# ### Make Fourier coordinate system
# +
Lx = Nx*xSize
Ly = Ny*ySize
qx = np.roll(np.arange((-Nx/2),(Nx/2 -1))/Lx, (np.hstack((0,-Nx/2)).astype(int)))
qy = np.roll(np.arange((-Ny/2),(Ny/2 -1))/Ly, (np.hstack((0,-Ny/2)).astype(int)))
[qya, qxa] = np.meshgrid(qy,qx);
q2 = np.dot(qxa,qxa) + np.dot(qya,qya)
# q4 = np.dot(q2,q2)
# q6 = np.dot(q2,q4)
q1 = np.sqrt(abs(q2))
# -
# ### Make propagators and anti aliasing aperture AA
dq = qx[1]-qx[0]
Adist = 2*(np.max(qx)/2 - q1)/dq+0
AA = Adist
AA = Adist
AA[Adist>1] = 1
AA[Adist<0] = 0
# ### Propagator
zint = 1j
prop = np.exp(-zint*np.pi*lamb*dz*q2)*AA
# ### Make probe components
# +
qMax = alphaMax/lamb
chiProbe = (2*np.pi/lamb)*((1/2)*(lamb**2)*q2*df)
# chiProbe = (2*np.pi/lamb)*((1/2)*(lamb**2)*q2*df + (1/4)*(lamb**4)*q4*C3 + (1/6)*(lamb**6)*q6*C5)
# -
chiProbe.shape
# ### Construct projected potentials
fparams = loadmat('fparams.mat')
# +
ap = fparams['fparams'][37,:]
#Super sampling for potential integration (should be even!!)
ss = 2
#Constants
a0 = 0.5292
e = 14.4
term1 = 2*np.pi**2*a0*e
term2 = 2*pi**(5/2)*a0*e
#Make supersampled 2D grid for integration
dx = (xr[1] - xr[0])
dy = (yr[1] - yr[0])
sub = np.arange((-(ss-1)/ss/2),((ss-1)/ss/2),(1/ss))
#sub = (-(ss-1)/ss/2):(1/ss):((ss-1)/ss/2)
[x1,x2] = np.meshgrid(xr,sub*dx)
xv = x1[:] + x2[:]
[y1,y2] = np.meshgrid(yr,sub*dy)
yv = y1[:] + y2[:]
[ya,xa] = np.meshgrid(yv,xv)
r2 = xa**2 + ya**2
r = np.sqrt(r2)
potSS = term1*(ap[0]* kn(0,2*np.pi*np.sqrt(ap[1])*r)+ ap[2]*kn(0,2*np.pi*np.sqrt(ap[3]*r))+ ap[4]*kn(0,2*np.pi*np.sqrt(ap[5]*r)))
+ term2*(ap[6]/ap[7]*np.exp((-np.pi**2)/ap[7]*r2)
+ ap[8]/ap[9]*np.exp((-np.pi**2)/ap[9]*r2)
+ ap[10]/ap[11]*np.exp((-np.pi**2)/ap[11]*r2))
potMid = np.zeros((len(xr),len(yr)))
for a0 in np.arange(0,ss):
potMid = potMid + potSS[(np.arange(a0,(-1+a0-ss),ss)),:]
# pot = zeros(length(xr),length(yr))
# for a0 = 1:ss:
# pot = pot + potMid(:,a0:ss:(end+a0-ss))
# pot = pot / ss^2;
# -
def projPot(fparams,atomID,xr,yr):
#Super sampling for potential integration (should be even!!)
ss = 4
#Constants
a0 = 0.5292
e = 14.4
term1 = 2*np.pi**2*a0*e
term2 = 2*np.pi**(5/2)*a0*e
#Make supersampled 2D grid for integration
dx = (xr[1] - xr[0])
dy = (yr[1] - yr[0])
sub = np.arange((-(ss-1)/ss/2),((ss-1)/ss/2),(1/ss))
#sub = (-(ss-1)/ss/2):(1/ss):((ss-1)/ss/2)
[x1,x2] = np.meshgrid(xr,sub*dx)
xv = x1[:] + x2[:]
[y1,y2] = np.meshgrid(yr,sub*dy)
yv = y1[:] + y2[:]
[ya,xa] = np.meshgrid(yv,xv)
r2 = xa**2 + ya**2
r = np.sqrt(r2)
#Compute potential
ap = fparams['fparams'][atomID,:]
potSS = term1*(ap[0]*kn(0,2*np.pi*np.sqrt(ap[1])*r)
+ ap[2]*kn(0,2*np.pi*np.sqrt(ap[3]*r))+ ap[4]*kn(0,2*np.pi*np.sqrt(ap[5]*r)))
+ term2*(ap[6]/ap[7]*np.exp((-np.pi**2)/ap[7]*r2)
+ ap[8]/ap[9]*np.exp((-np.pi**2)/ap[9]*r2)
+ ap[10]/ap[11]*np.exp((-np.pi**2)/ap[11]*r2))
#Integrate!
return pot
# +
# potSr = projPot(fparams,37,xr,yr)
# +
xyLeng = np.ceil(potBound/xySize)
xvec = np.arange(-xyLeng[0]+1,xyLeng[0])
yvec = np.arange(-xyLeng[1]+1,xyLeng[1])
xr = xvec*xySize[0]
yr = yvec*xySize[1]
# potSr = projPot(38,xr,yr);
# potTi = projPot(22,xr,yr);
# potO = projPot(8,xr,yr);
# -
# ### Preparing detector
## Keep only center portion of detector
dx = np.round(Nx/4);
dy = np.round(Ny/4);
# xvecDet = np.array([np.arange(0,dx), np.arange(Nx+((-Nx/2 +1 +dx),0))]);
# yvecDet = [1:dy Ny+((-Nx/2+1+dx):0)]
# ### Preparing slices
# Divide up atoms into slices
zVals = np.round((atoms[:,2] - min(atoms[:,2]))/dz +.5)
zPlanes = np.sort(np.unique(zVals))
Nx/2
# ### Main loops
data = np.zeros((int(Nx/2), int(Ny/2), np.size(xp), np.size(yp), np.size(int(thickCells))))
intData = np.zeros((np.max(int(thickCells)),2))
data.shape, intData.shape
# +
#preparing to plot potentials
if f_plotPot == 1:
potSum = np.zeros((int(Nx),int(Ny)))
# -
for a1 in np.arange(0,numFP):
print(a1)
psi = np.zeros((int(Nx)-1,int(Ny)-1,np.size(xp), np.size(yp)),dtype=np.complex_)
probefft = np.exp(-1j*chiProbe-2*np.pi*1j*(qxa*(xp[0])+ qya*(yp[0])))*AA
probefft = probefft/np.sqrt(np.sum(np.sum(abs(probefft)**2)))
psi[:,:,0,0] = probefft;
psi.shape,chiProbe.shape
# +
# first loop over numFP
for a1 in np.arange(0,numFP):
#Initialize probes
psi = np.zeros((int(Nx)-1,int(Ny)-1,np.size(xp), np.size(yp)),dtype=np.complex_)
for a2 in np.arange(0,np.size(xp)):
for a3 in np.arange(0,np.size(yp)):
probefft = np.exp(-1j*chiProbe-2*np.pi*1j*(qxa*(xp[0])+ qya*(yp[0])))*AA
probefft = probefft/np.sqrt(np.sum(np.sum(abs(probefft)**2)))
psi[:,:,0,0] = probefft
#Propagate through whole foil
for a2 = 1:max(thickCells)
aSub = atoms(zVals==zPlanes(mod(a2-1,length(zPlanes))+1),:)
#Generate slice potential
pot = np.zeros((int(Nx),int(Ny)))
for a3 = 1:size(aSub,1)
if aSub(a3,4) == 8:
x = mod(xvec+round((aSub(a3,1)+randn*uO)/xySize(1)),Nx)+1;
y = mod(yvec+round((aSub(a3,2)+randn*uO)/xySize(2)),Ny)+1;
pot(x,y) = pot(x,y) + potO
elif aSub(a3,4) == 22:
x = mod(xvec+round((aSub(a3,1)+randn*uTi)/xySize(1)),Nx)+1;
y = mod(yvec+round((aSub(a3,2)+randn*uTi)/xySize(2)),Ny)+1;
pot(x,y) = pot(x,y) + potTi
elif aSub(a3,4) == 38:
x = mod(xvec+round((aSub(a3,1)+randn*uSr)/xySize(1)),Nx)+1;
y = mod(yvec+round((aSub(a3,2)+randn*uSr)/xySize(2)),Ny)+1;
pot(x,y) = pot(x,y) + potSr;
if f_plotPot == 1:
potSum = potSum + pot
trans = exp(1i*s*pot)
for a3 = 1:length(xp):
for a4 = 1:length(yp):
psi(:,:,a3,a4) = fft2(ifft2(psi(:,:,a3,a4))*trans)*prop
## Integrated intensity
intData(a2,:) = emdSTEM.intData(a2,:)+ [dz*a3 sum(abs(psi(:).^2))/length(xp)/length(yp)]
## Output results
[val,ind] = min(abs(thickCells-a2))
if val == 0:
for a3 = 1:length(xp)
for a4 = 1:length(yp)
data(:,:,a3,a4,ind,a0) = data(:,:,a3,a4,ind,a0) + abs(psi(xvecDet,yvecDet,a3,a4))**2
data = data/numFP;
emdSTEM.intData = emdSTEM.intData / numFP / length(compArray);
## Rescale coordinates for antialiasing scale
xySize = emdSTEM.xySize*2
if f_plotPot == 1:
emdSTEM.potSum = potSum;
# figure(1)
# clf
# imagesc(potSum/numFP)
# axis equal off
# colormap(hot(256))
# set(gca,'position',[0 0 1 1])
# -
# ************************************************************************
| Week6/.ipynb_checkpoints/01_sSim-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/felipemoreia/acelaradev-codenation/blob/master/principal_components_analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="DIhqRYp2Y3m0" colab_type="text"
# # Desafio 5
#
# Neste desafio, vamos praticar sobre redução de dimensionalidade com PCA e seleção de variáveis com RFE. Utilizaremos o _data set_ [Fifa 2019](https://www.kaggle.com/karangadiya/fifa19), contendo originalmente 89 variáveis de mais de 18 mil jogadores do _game_ FIFA 2019.
#
# > Obs.: Por favor, não modifique o nome das funções de resposta.
# + [markdown] id="_WnQWZ7UY3m1" colab_type="text"
# ## _Setup_ geral
# + id="KrdvXw4FY3m2" colab_type="code" colab={}
from math import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
import statsmodels.api as sm
import statsmodels.stats as st
from sklearn.decomposition import PCA
from loguru import logger
# + id="aQGdHv09Y3m5" colab_type="code" colab={}
# Algumas configurações para o matplotlib.
# #%matplotlib inline
#from IPython.core.pylabtools import figsize
#figsize(12, 8)
#sns.set()
# + id="tORr8RVvY3m8" colab_type="code" colab={}
fifa = pd.read_csv("fifa.csv")
# + id="m5yCKJ2KY3m-" colab_type="code" colab={} outputId="a78caa11-5d9a-42d4-967c-4e15f9581adb"
fifa.head()
# + id="89yhy-mOY3nB" colab_type="code" colab={}
columns_to_drop = ["Unnamed: 0", "ID", "Name", "Photo", "Nationality", "Flag",
"Club", "Club Logo", "Value", "Wage", "Special", "Preferred Foot",
"International Reputation", "Weak Foot", "Skill Moves", "Work Rate",
"Body Type", "Real Face", "Position", "Jersey Number", "Joined",
"Loaned From", "Contract Valid Until", "Height", "Weight", "LS",
"ST", "RS", "LW", "LF", "CF", "RF", "RW", "LAM", "CAM", "RAM", "LM",
"LCM", "CM", "RCM", "RM", "LWB", "LDM", "CDM", "RDM", "RWB", "LB", "LCB",
"CB", "RCB", "RB", "Release Clause"
]
try:
fifa.drop(columns_to_drop, axis=1, inplace=True)
except KeyError:
logger.warning(f"Columns already dropped")
# + [markdown] id="hO-5iK_QY3nH" colab_type="text"
# ## Inicia sua análise a partir daqui
# + id="booG6ai5Y3nI" colab_type="code" colab={} outputId="29de167f-f62b-4acc-cb82-7592001d38f7"
fifa.shape
# + id="6XOis3WNY3nM" colab_type="code" colab={} outputId="195037e0-0e40-49b4-d30a-9f1271f9344f"
# Sua análise começa aqui.
fifa.info()
# + id="x7htCefpY3nP" colab_type="code" colab={}
# + [markdown] id="O3SO0pJZY3nS" colab_type="text"
# ## Questão 1
#
# Qual fração da variância consegue ser explicada pelo primeiro componente principal de `fifa`? Responda como um único float (entre 0 e 1) arredondado para três casas decimais.
# + id="93I1w_l5Y3nS" colab_type="code" colab={}
def q1():
pca = PCA()
pca.fit_transform(fifa.dropna())
evr = pca.explained_variance_ratio_
pca_1 = evr[0]
return float(round(pca_1,3))
# + id="a2DIRW9DY3nV" colab_type="code" colab={} outputId="9e42a07a-e8c7-4ab9-c285-ab579e7f9e7a"
q1()
# + [markdown] id="NuNBSAI7Y3nY" colab_type="text"
# ## Questão 2
#
# Quantos componentes principais precisamos para explicar 95% da variância total? Responda como un único escalar inteiro.
# + id="22xGtm_8Y3nZ" colab_type="code" colab={}
def q2():
pca_95 = PCA(0.95)
pca_95.fit_transform(fifa.dropna())
optimal_features = (pca_95.n_components_)
return int(optimal_features)
# + id="ZYsz659GY3nc" colab_type="code" colab={} outputId="f5b71ff7-ce81-45c8-b9bb-6ca37b8593f3"
q2()
# + [markdown] id="gNshPlvEY3ne" colab_type="text"
# ## Questão 3
#
# Qual são as coordenadas (primeiro e segundo componentes principais) do ponto `x` abaixo? O vetor abaixo já está centralizado. Cuidado para __não__ centralizar o vetor novamente (por exemplo, invocando `PCA.transform()` nele). Responda como uma tupla de float arredondados para três casas decimais.
# + id="6PPKE6OAY3ne" colab_type="code" colab={}
x = [0.87747123, -1.24990363, -1.3191255, -36.7341814,
-35.55091139, -37.29814417, -28.68671182, -30.90902583,
-42.37100061, -32.17082438, -28.86315326, -22.71193348,
-38.36945867, -20.61407566, -22.72696734, -25.50360703,
2.16339005, -27.96657305, -33.46004736, -5.08943224,
-30.21994603, 3.68803348, -36.10997302, -30.86899058,
-22.69827634, -37.95847789, -22.40090313, -30.54859849,
-26.64827358, -19.28162344, -34.69783578, -34.6614351,
48.38377664, 47.60840355, 45.76793876, 44.61110193,
49.28911284
]
# + id="v6lN3jgGY3nh" colab_type="code" colab={}
def q3():
pca_2 = PCA(n_components=2)
pca_2.fit(fifa.dropna())
coord = np.dot(pca_2.components_,x)
return (round(coord[0], 3), round(coord[1], 3))
# + id="Dp-mq5UNY3nk" colab_type="code" colab={} outputId="93ac1b6a-08d5-46e2-ea1a-e01283ed4bab"
q3()
# + [markdown] id="UcV4yFJ2Y3nm" colab_type="text"
# ## Questão 4
#
# Realiza RFE com estimador de regressão linear para selecionar cinco variáveis, eliminando uma a uma. Quais são as variáveis selecionadas? Responda como uma lista de nomes de variáveis.
# + id="2TwJ3ZhzY3nn" colab_type="code" colab={}
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
# + id="Ai7lvNlcY3np" colab_type="code" colab={}
df_fifa = fifa.dropna()
# + id="OAHSNliAY3nr" colab_type="code" colab={}
def q4():
X = df_fifa.drop(columns='Overall', axis=1)
y = df_fifa['Overall']
lr = LinearRegression()
rfe = RFE(lr, n_features_to_select=5)
resposta = rfe.fit(X,y)
return list(X.columns[resposta.support_])
# + id="ALLWHnMNY3nu" colab_type="code" colab={} outputId="eeff1eb5-3cc1-4848-e75b-fbb09eeebcfb"
q4()
# + id="O6xJAwS6Y3nw" colab_type="code" colab={}
| principal_components_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from bs4 import BeautifulSoup
import string
import requests
from fake_useragent import UserAgent
ua = UserAgent()
# -
def remove_punctuation(x):
x = str(x)
return x.translate(str.maketrans({a:None for a in string.punctuation}))
def get_soup(url, timeout=5):
headers = {'User-Agent':ua.random}
try:
response = requests.get(url,headers=headers)
except:
print("FAILED "+ url)
return 0
attempts = 0
while(not response.ok):
print((url+' failed with code: '+str(response.status_code)))
if attempts > timeout:
print(url+' failed with code: '+str(response.status_code))
return BeautifulSoup('','lxml')
response = requests.get(url)
attempts += 1
page = response.text
soup = BeautifulSoup(page,'lxml')
return soup
url = 'http://www.beeradvocate.com/beer/style/'
soup = get_soup(url)
beer_styles = {}
for style in soup.find('table').find_all('a'):
beer_styles[style.get_text()] = style['href']
url = beer_styles.items()[0][1]
ba_url = 'http://www.beeradvocate.com'
soup = get_soup(ba_url+url)
# +
beers = {}
ba_url = 'http://www.beeradvocate.com'
for style in beer_styles.items():
style_url = style[1]
soup = get_soup(ba_url+style_url)
for beer_row in soup.find_all('tr')[3:-1]:
beer_link = beer_row.find(class_ = 'hr_bottom_light').find('a')
beers[beer_link.get_text()] = beer_link['href']
# -
ba_url = 'http://www.beeradvocate.com'
def get_beer_reviews(soup):
reviews = []
for rating in soup.find_all(id='rating_fullview_content_2'):
for span in rating.find_all('span'):
span.extract()
review = rating.get_text().strip().encode('utf-8')
review = review.replace('rDev','')
reviews.append(str(review))
return reviews
# +
beer_reviews = {}
ba_url = 'http://www.beeradvocate.com'
for beer in beers.items():
soup = get_soup(ba_url+beer[1])
print(beer[0])
beer_reviews[beer[0]] = get_beer_reviews(soup)
# -
beers
import pickle
pickle.dump(beer_reviews,open('beer_reviews.pkl','wb'))
write_dict_file = open('beers.pkl','wb')
pickle.dump(beers,write_dict_file)
write_dict_file.close()
pickle.dump(beer_styles,open('beer_styles.pkl','wb'))
df.apply
| scrape-from-beer-advocate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 03 - Columnar Vs Row Storage
# - The columnar storage extension used here:
# - cstore_fdw by citus_data [https://github.com/citusdata/cstore_fdw](https://github.com/citusdata/cstore_fdw)
# - The data tables are the ones used by citus_data to show the storage extension
#
# %load_ext sql
# ## STEP 0 : Connect to the local database where Pagila is loaded
#
# ### Create the database
# +
# !sudo -u postgres psql -c 'CREATE DATABASE reviews;'
# !wget http://examples.citusdata.com/customer_reviews_1998.csv.gz
# !wget http://examples.citusdata.com/customer_reviews_1999.csv.gz
# !gzip -d customer_reviews_1998.csv.gz
# !gzip -d customer_reviews_1999.csv.gz
# !mv customer_reviews_1998.csv /tmp/customer_reviews_1998.csv
# !mv customer_reviews_1999.csv /tmp/customer_reviews_1999.csv
# -
# ### Connect to the database
# +
DB_ENDPOINT = "127.0.0.1"
DB = 'reviews'
DB_USER = 'student'
DB_PASSWORD = '<PASSWORD>'
DB_PORT = '5432'
# postgresql://username:password@host:port/database
conn_string = "postgresql://{}:{}@{}:{}/{}" \
.format(DB_USER, DB_PASSWORD, DB_ENDPOINT, DB_PORT, DB)
print(conn_string)
# -
# %sql $conn_string
# ## STEP 1 : Create a table with a normal (Row) storage & load data
#
# **TODO:** Create a table called customer_reviews_row with the column names contained in the `customer_reviews_1998.csv` and `customer_reviews_1999.csv` files.
# + language="sql"
# DROP TABLE IF EXISTS customer_reviews_row;
# CREATE TABLE customer_reviews_row
# (
# customer_id TEXT,
# review_date DATE,
# review_rating INTEGER,
# review_votes INTEGER,
# review_helpful_votes INTEGER,
# product_id CHAR(10),
# product_title TEXT,
# product_sales_rank BIGINT,
# product_group TEXT,
# product_category TEXT,
# product_subcategory TEXT,
# similar_product_ids CHAR(10) []
# )
# -
# **TODO:** Use the [COPY statement](https://www.postgresql.org/docs/9.2/sql-copy.html) to populate the tables with the data in the `customer_reviews_1998.csv` and `customer_reviews_1999.csv` files. You can access the files in the `/tmp/` folder.
# + language="sql"
# COPY customer_reviews_row FROM '/tmp/customer_reviews_1998.csv' WITH CSV;
# COPY customer_reviews_row FROM '/tmp/customer_reviews_1999.csv' WITH CSV;
# -
# ## STEP 2 : Create a table with columnar storage & load data
#
# First, load the extension to use columnar storage in Postgres.
# + language="sql"
#
# -- load extension first time after install
# CREATE EXTENSION cstore_fdw;
#
# -- create server object
# CREATE SERVER cstore_server FOREIGN DATA WRAPPER cstore_fdw;
# -
# **TODO:** Create a `FOREIGN TABLE` called `customer_reviews_col` with the column names contained in the `customer_reviews_1998.csv` and `customer_reviews_1999.csv` files.
# + language="sql"
# -- create foreign table
# DROP FOREIGN TABLE IF EXISTS customer_reviews_col;
#
# -------------
# CREATE FOREIGN TABLE customer_reviews_col
# (
# customer_id TEXT,
# review_date DATE,
# review_rating INTEGER,
# review_votes INTEGER,
# review_helpful_votes INTEGER,
# product_id CHAR(10),
# product_title TEXT,
# product_sales_rank BIGINT,
# product_group TEXT,
# product_category TEXT,
# product_subcategory TEXT,
# similar_product_ids CHAR(10) []
# )
#
#
# -------------
# -- leave code below as is
# SERVER cstore_server
# OPTIONS(compression 'pglz');
# -
# **TODO:** Use the [COPY statement](https://www.postgresql.org/docs/9.2/sql-copy.html) to populate the tables with the data in the `customer_reviews_1998.csv` and `customer_reviews_1999.csv` files. You can access the files in the `/tmp/` folder.
# + language="sql"
# COPY customer_reviews_col FROM '/tmp/customer_reviews_1998.csv' WITH CSV;
# COPY customer_reviews_col FROM '/tmp/customer_reviews_1999.csv' WITH CSV;
# -
# ## Step 3: Compare perfromamce
#
# Now run the same query on the two tables and compare the run time. Which form of storage is more performant?
#
# **TODO**: Write a query that calculates the average `review_rating` by `product_title` for all reviews in 1995. Sort the data by `review_rating` in descending order. Limit the results to 20.
#
# First run the query on `customer_reviews_row`:
# +
# %%time
# %%sql
SELECT product_title, review_date, AVG(review_rating) as average
FROM customer_reviews_row
WHERE review_date >= '1995-01-01' AND
review_date <= '1995-12-31'
GROUP BY (product_title, review_date)
ORDER BY average DESC
LIMIT 20
# -
# Then on `customer_reviews_col`:
# +
# %%time
# %%sql
SELECT product_title, review_date, AVG(review_rating) as average
FROM customer_reviews_col
WHERE review_date >= '1995-01-01' AND
review_date <= '1995-12-31'
GROUP BY (product_title, review_date)
ORDER BY average DESC
LIMIT 20
# -
# ## Conclusion: We can see that the columnar storage is faster!
| course_materials/project_03_data_warehouses/L1 E3 - Columnar Vs Row Storage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf-gpu-2.0
# language: python
# name: tf_gpu2
# ---
# + active=""
# 1 - Introduction to Triggers
# -
# +
## Creating your first trigger
-- Create a new trigger that fires when deleting data
CREATE TRIGGER PreventDiscountsDelete
ON Discounts
-- The trigger should fire instead of DELETE
INSTEAD OF DELETE
AS
PRINT 'You are not allowed to delete data from the Discounts table.';
# -
# +
## Practicing creating triggers
-- Set up a new trigger
CREATE TRIGGER OrdersUpdatedRows
ON Orders
-- The trigger should fire after UPDATE statements
AFTER UPDATE
-- Add the AS keyword before the trigger body
AS
-- Insert details about the changes to a dedicated table
INSERT INTO OrdersUpdate(OrderID, OrderDate, ModifyDate)
SELECT OrderID, OrderDate, GETDATE()
FROM inserted;
# -
# +
## Creating a trigger to keep track of data changes
-- Create a new trigger
CREATE TRIGGER ProductsNewItems
ON Products
AFTER INSERT
AS
-- Add details to the history table
INSERT INTO ProductsHistory(Product, Price, Currency, FirstAdded)
SELECT Product, Price, Currency, GETDATE()
FROM inserted;
# -
# +
## Triggers vs. stored procedures
-- Run an update for some of the discounts
UPDATE Discounts
SET Discount = Discount + 1
WHERE Discount <= 5;
-- Verify the trigger ran successfully
SELECT * FROM DiscountsHistory;
_________________________________
UPDATE Discounts SET Discount = Discount + 1 WHERE Discount <= 5
# -
# +
## Triggers vs. computed columns
-- Add the following rows to the table
INSERT INTO SalesWithPrice (Customer, Product, Price, Currency, Quantity)
VALUES ('Fruit Mag', 'Pomelo', 1.12, 'USD', 200),
('VitaFruit', 'Avocado', 2.67, 'USD', 400),
('Tasty Fruits', 'Blackcurrant', 2.32, 'USD', 1100),
('Health Mag', 'Kiwi', 1.42, 'USD', 100),
('eShop', 'Plum', 1.1, 'USD', 500);
-- Verify the results after adding the new rows
SELECT * FROM SalesWithPrice;
__________________________________________________________
-- Add the following rows to the table
INSERT INTO SalesWithoutPrice (Customer, Product, Currency, Quantity)
VALUES ('Fruit Mag', 'Pomelo', 'USD', 200),
('VitaFruit', 'Avocado', 'USD', 400),
('Tasty Fruits', 'Blackcurrant', 'USD', 1100),
('Health Mag', 'Kiwi', 'USD', 100),
('eShop', 'Plum', 'USD', 500);
-- Verify the results after the INSERT
SELECT * FROM SalesWithoutPrice;
# -
# + active=""
# 2 - Classification of Triggers
# -
# +
## Tracking retired products
-- Create the trigger
CREATE TRIGGER TrackRetiredProducts
ON Products
AFTER DELETE
AS
INSERT INTO RetiredProducts (Product, Measure)
SELECT Product, Measure
FROM deleted;
# -
# +
## The TrackRetiredProducts trigger in action
-- Remove the products that will be retired
DELETE FROM Products
WHERE Product IN ('Cloudberry', 'Guava', 'Nance', 'Yuzu');
-- Verify the output of the history table
SELECT * FROM RetiredProducts;
# -
# +
## Practicing with AFTER triggers
-- Create a new trigger for canceled orders
CREATE TRIGGER KeepCanceledOrders
ON Orders
AFTER UPDATE
AS
INSERT INTO CanceledOrders
SELECT * FROM deleted;
_____________________________________________
-- Create a new trigger to keep track of discounts
CREATE TRIGGER CustomerDiscountHistory
ON Discounts
AFTER UPDATE
AS
-- Store old and new values into the `DiscountsHistory` table
INSERT INTO DiscountsHistory (Customer, OldDiscount, NewDiscount, ChangeDate)
SELECT i.Customer, d.Discount, i.Discount, GETDATE()
FROM inserted AS i
INNER JOIN deleted AS d ON i.Customer = d.Customer;
________________________________________________
-- Notify the Sales team of new orders
CREATE TRIGGER NewOrderAlert
ON Orders
AFTER UPDATE
AS
EXECUTE SendEmailtoSales;
# -
# +
## Preventing changes to orders
-- Create the trigger
CREATE TRIGGER PreventOrdersUpdate
ON Orders
INSTEAD OF UPDATE
AS
RAISERROR ('Updates on "Orders" table are not permitted.
Place a new order to add new products.', 16, 1);
_____________________________________________________
## PreventOrdersUpdate in action
UPDATE Orders SET Quantity = 700 WHERE OrderID = 425;
# -
# +
## Creating the PreventNewDiscounts trigger
-- Create a new trigger
CREATE TRIGGER PreventNewDiscounts
ON Discounts
INSTEAD OF INSERT
AS
RAISERROR ('You are not allowed to add discounts for existing customers.
Contact the Sales Manager for more details.', 16, 1);
# -
# +
## Tracking Table Changes
-- Create the trigger to log table info
CREATE TRIGGER TrackTableChanges
ON DATABASE
FOR CREATE_TABLE,
ALTER_TABLE,
DROP_TABLE
AS
INSERT INTO TablesChangeLog (EventData, ChangedBy)
VALUES (EVENTDATA(), USER);
# -
# +
## Preventing table deletion
-- Add a trigger to disable the removal of tables
CREATE TRIGGER PreventTableDeletion
ON DATABASE
FOR CREATE_TABLE
AS
RAISERROR ('You are not allowed to remove tables from this database.', 16, 1);
-- Revert the statement that removes the table
ROLLBACK;
# -
# +
## Enhancing database security
-- Save user details in the audit table
INSERT INTO ServerLogonLog (LoginName, LoginDate, SessionID, SourceIPAddress)
SELECT ORIGINAL_LOGIN(), GETDATE(), @@SPID, client_net_address
-- The user details can be found in SYS.DM_EXEC_CONNECTIONS
FROM SYS.DM_EXEC_CONNECTIONS WHERE session_id = @@SPID;
_______________________________________________
-- Create a trigger firing when users log on to the server
CREATE TRIGGER LogonAudit
-- Use ALL SERVER to create a server-level trigger
ON ALL SERVER WITH EXECUTE AS 'sa'
-- The trigger should fire after a logon
FOR LOGON
AS
-- Save user details in the audit table
INSERT INTO ServerLogonLog (LoginName, LoginDate, SessionID, SourceIPAddress)
SELECT ORIGINAL_LOGIN(), GETDATE(), @@SPID, client_net_address
FROM SYS.DM_EXEC_CONNECTIONS WHERE session_id = @@SPID;
# -
# + active=""
# 3 - Trigger Limitations and Use Cases
# -
# +
## Creating a report on existing triggers
-- Get the column that contains the trigger name
SELECT name AS TriggerName,
parent_class_desc AS TriggerType,
create_date AS CreateDate,
modify_date AS LastModifiedDate,
is_disabled AS Disabled,
-- Get the column that tells if it's an INSTEAD OF trigger
is_instead_of_trigger AS InsteadOfTrigger
FROM sys.triggers;
_______________________________________________
-- Gather information about database triggers
SELECT name AS TriggerName,
parent_class_desc AS TriggerType,
create_date AS CreateDate,
modify_date AS LastModifiedDate,
is_disabled AS Disabled,
is_instead_of_trigger AS InsteadOfTrigger,
-- Get the trigger definition by using a function
OBJECT_DEFINITION (object_id)
FROM sys.triggers
UNION ALL
-- Gather information about server triggers
SELECT name AS TriggerName,
parent_class_desc AS TriggerType,
create_date AS CreateDate,
modify_date AS LastModifiedDate,
is_disabled AS Disabled,
0 AS InsteadOfTrigger,
-- Get the trigger definition by using a function
OBJECT_DEFINITION (object_id)
FROM sys.server_triggers
ORDER BY TriggerName;
# -
# +
## Keeping a history of row changes
-- Create a trigger to keep row history
CREATE TRIGGER CopyCustomersToHistory
ON Customers
-- Fire the trigger for new and updated rows
AFTER INSERT, UPDATE
AS
INSERT INTO CustomersHistory (CustomerID, Customer, ContractID, ContractDate, Address, PhoneNo, Email, ChangeDate)
SELECT CustomerID, Customer, ContractID, ContractDate, Address, PhoneNo, Email, GETDATE()
-- Get info from the special table that keeps new rows
FROM inserted;
# -
# +
## Table auditing using triggers
-- Add a trigger that tracks table changes
CREATE TRIGGER OrdersAudit
ON Orders
AFTER INSERT, UPDATE, DELETE
AS
DECLARE @Insert BIT = 0;
DECLARE @Delete BIT = 0;
IF EXISTS (SELECT * FROM inserted) SET @Insert = 1;
IF EXISTS (SELECT * FROM deleted) SET @Delete = 1;
INSERT INTO TablesAudit (TableName, EventType, UserAccount, EventDate)
SELECT 'Orders' AS TableName
,CASE WHEN @Insert = 1 AND @Delete = 0 THEN 'INSERT'
WHEN @Insert = 1 AND @Delete = 1 THEN 'UPDATE'
WHEN @Insert = 0 AND @Delete = 1 THEN 'DELETE'
END AS Event
,ORIGINAL_LOGIN() AS UserAccount
,GETDATE() AS EventDate;
# -
# +
## Preventing changes to Products
-- Prevent any product changes
CREATE TRIGGER PreventProductChanges
ON Products
AFTER insert, UPDATE
AS
RAISERROR ('Updates of products are not permitted. Contact the database administrator if a change is needed.', 16, 1);
# -
# +
## Checking stock before placing orders
-- Create a new trigger to confirm stock before ordering
CREATE TRIGGER ConfirmStock
ON Orders
INSTEAD OF INSERT
AS
IF EXISTS (SELECT *
FROM Products AS p
INNER JOIN inserted AS i ON i.Product = p.Product
WHERE p.Quantity < i.Quantity)
BEGIN
RAISERROR ('You cannot place orders when there is no stock for the order''s product.', 16, 1);
END
ELSE
BEGIN
INSERT INTO Orders (OrderID, Customer, Product, Price, Currency, Quantity, WithDiscount, Discount, OrderDate, TotalAmount, Dispatched)
SELECT OrderID, Customer, Product, Price, Currency, Quantity, WithDiscount, Discount, OrderDate, TotalAmount, Dispatched FROM Orders;
END;
# -
# +
## Database auditing
-- Create a new trigger
CREATE TRIGGER DatabaseAudit
-- Attach the trigger at the database level
ON DATABASE
-- Fire the trigger for all tables/ views events
FOR DDL_TABLE_VIEW_EVENTS
AS
INSERT INTO DatabaseAudit (EventType, DatabaseName, SchemaName, Object, ObjectType, UserAccount, Query, EventTime)
SELECT EVENTDATA().value('(/EVENT_INSTANCE/EventType)[1]', 'NVARCHAR(50)') AS EventType
,EVENTDATA().value('(/EVENT_INSTANCE/DatabaseName)[1]', 'NVARCHAR(50)') AS DatabaseName
,EVENTDATA().value('(/EVENT_INSTANCE/SchemaName)[1]', 'NVARCHAR(50)') AS SchemaName
,EVENTDATA().value('(/EVENT_INSTANCE/ObjectName)[1]', 'NVARCHAR(100)') AS Object
,EVENTDATA().value('(/EVENT_INSTANCE/ObjectType)[1]', 'NVARCHAR(50)') AS ObjectType
,EVENTDATA().value('(/EVENT_INSTANCE/LoginName)[1]', 'NVARCHAR(100)') AS UserAccount
,EVENTDATA().value('(/EVENT_INSTANCE/TSQLCommand/CommandText)[1]', 'NVARCHAR(MAX)') AS Query
,EVENTDATA().value('(/EVENT_INSTANCE/PostTime)[1]', 'DATETIME') AS EventTime;
# -
# +
## Preventing server changes
-- Create a trigger to prevent database deletion
CREATE TRIGGER PreventDatabaseDelete
-- Attach the trigger at the server level
ON all server
FOR DROP_DATABASE
AS
PRINT 'You are not allowed to remove existing databases.';
ROLLBACK;
# -
# + active=""
# 4 - Trigger Optimization and Management
# -
# +
## Removing unwanted triggers
-- Remove the trigger
DROP TRIGGER PreventNewDiscounts;
______________________________________
-- Remove the database trigger
DROP TRIGGER PreventTableDeletion
ON DATABASE;
____________________________
-- Remove the server trigger
DROP TRIGGER DisallowLinkedServers
ON ALL SERVER;
# -
# +
## Modifying a trigger's definition
-- Fix the typo in the trigger message
ALTER TRIGGER PreventDiscountsDelete
ON Discounts
INSTEAD OF DELETE
AS
PRINT 'You are not allowed to remove data from the Discounts table.';
# -
# +
## Disabling a trigger
-- Pause the trigger execution
DISABLE TRIGGER PreventOrdersUpdate
ON Orders;
# -
# +
## Re-enabling a disabled trigger
-- Resume the trigger execution
ENABLE TRIGGER PreventOrdersUpdate
ON Orders;
# -
# +
## Managing existing triggers
-- Get the disabled triggers
SELECT name,
object_id,
parent_class_desc
FROM sys.triggers
WHERE is_disabled = 1;
_______________________
-- Check for unchanged server triggers
SELECT *
FROM sys.server_triggers
WHERE create_date = modify_date;
______________________________
-- Get the table triggers
SELECT *
FROM sys.triggers
WHERE parent_class_desc = 'DATABASE';
# -
# +
## Keeping track of trigger executions
-- Modify the trigger to add new functionality
ALTER TRIGGER PreventOrdersUpdate
ON Orders
-- Prevent any row changes
INSTEAD OF UPDATE
AS
-- Keep history of trigger executions
INSERT INTO TriggerAudit (TriggerName, ExecutionDate)
SELECT 'PreventOrdersUpdate',
GETDATE();
RAISERROR ('Updates on "Orders" table are not permitted.
Place a new order to add new products.', 16, 1);
# -
# +
## Identifying problematic triggers
-- Get the table ID
SELECT object_id AS TableID
FROM sys.objects
WHERE name = 'Orders';
_________________________________________
-- Get the trigger name
SELECT t.object_id AS TriggerName
FROM sys.objects AS o
-- Join with the triggers table
INNER JOIN sys.triggers AS t ON t.parent_id = o.object_id
WHERE o.name = 'Orders';
_______________________________________
SELECT t.name AS TriggerName
FROM sys.objects AS o
INNER JOIN sys.triggers AS t ON t.parent_id = o.object_id
-- Get the trigger events
INNER JOIN sys.trigger_events AS te ON te.object_id = t.object_id
WHERE o.name = 'Orders'
-- Filter for triggers reacting to new rows
AND te.type_desc = 'INSERT';
______________________________________________
SELECT t.name AS TriggerName,
OBJECT_DEFINITION(t.parent_class_desc) AS TriggerDefinition
FROM sys.objects AS o
INNER JOIN sys.triggers AS t ON t.parent_id = o.object_id
INNER JOIN sys.trigger_events AS te ON te.object_id = t.object_id
WHERE o.name = 'Orders'
AND te.type_desc = 'INSERT';
# -
| 17 - Building and Optimizing Triggers in SQL Server.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Supplementary Material
#
# Additional material helpful during the demo.
# ## Explore Impact of Model Parameter Settings / Over- and Underfitting
#
# What happens when we adjust the model parameters?
# +
import inspect
from ipywidgets import interactive
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from util import DATA_PATH_PREPROCESSED, TARGET, FEATURES
# %matplotlib inline
sns.set_style('whitegrid')
# -
REGRESSORS = {
'DecisionTree': DecisionTreeRegressor,
'RandomForest': RandomForestRegressor,
}
def create_noisy_sine(n=100):
prng = np.random.RandomState(123)
X = np.sort(prng.rand(n))
y = np.sin(2 * np.pi * X)
y += prng.normal(0, 0.1, len(y))
y[5::10] += 2*(0.5 - prng.rand(len(y[5::10])))
X_true = np.linspace(0, 1, n)
y_true = np.sin(2 * np.pi * X_true)
return X_true, y_true, X, y
# +
def regression(clf='DecisionTree', n_estimators=1, min_samples_leaf=1, max_depth=None):
kws = {
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'n_estimators': n_estimators,
}
regr_cls = REGRESSORS[clf]
supported_kws = {k: v for k, v in kws.items() if k in inspect.getfullargspec(regr_cls).args}
regr = regr_cls(**supported_kws, random_state=42)
X_true, y_true, X, y = create_noisy_sine(n=80)
regr.fit(X.reshape((-1, 1)), y)
yp = regr.predict(X.reshape((-1, 1)))
fig, ax = plt.subplots(figsize=(10, 6))
ax.scatter(X, y, s=20, color='green', edgecolor='black', label='noisy data')
ax.plot(X_true, y_true, label='true')
ax.plot(X, yp, color='red', label='pred')
ax.legend()
ax.set_xlabel('x')
ax.set_ylabel('y = sin(2pi x)')
plt.show()
interactive_plot = interactive(
regression,
clf=['DecisionTree', 'RandomForest'],
max_depth=[1, 3, 5, 10, None],
n_estimators=[1, 2, 5, 10, 50, 100, 200, 500, 1000],
min_samples_leaf=[1, 3, 5, 10],
)
interactive_plot
# +
DATASETS = {
'moons': make_moons(noise=0.3, random_state=42),
'circles': make_circles(noise=0.2, factor=0.5, random_state=42),
'linear': make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=42, n_clusters_per_class=1, flip_y=0.1),
}
CLASSIFIERS = {
'DecisionTree': DecisionTreeClassifier,
'RandomForest': RandomForestClassifier,
}
# +
def classification(clf='DecisionTree', data='linear', max_depth=None, n_estimators=1, min_samples_leaf=1, max_features=1):
kws = {
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'max_features': max_features,
'n_estimators': n_estimators,
}
# prepare model
clf_cls = CLASSIFIERS[clf]
supported_kws = {k: v for k, v in kws.items() if k in inspect.getfullargspec(clf_cls).args}
clf = clf_cls(**supported_kws, random_state=42)
# prepare data
X, y = DATASETS[data]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = .02
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# define color scheme
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
cm_bright_other = ListedColormap(['#FF8000', '#0080FF'])
# fit predict
clf.fit(X_train, y_train)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
# plot
fig, ax = plt.subplots(figsize=(10, 10))
ax.contourf(xx, yy, Z, cmap=cm, alpha=0.6)
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k', label='train')
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright_other,
edgecolors='k', label='test')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.legend()
plt.show()
interactive_plot = interactive(
classification,
data=['moons', 'circles', 'linear'],
clf=['DecisionTree', 'RandomForest'],
max_depth=[1, 3, 5, None],
n_estimators=[1, 10, 100, 500],
min_samples_leaf=[1, 2, 5],
max_features=[1, 2],
)
interactive_plot
| supplement.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# Dependencies and Setup
import pandas as pd
import os
from matplotlib import pyplot as plt
import numpy as np
# File to Load (Remember to Change These)
# school_data_to_load = "Resources/schools_complete.csv"
# student_data_to_load = "Resources/students_complete.csv"
schools_data = "Resources/schools_complete.csv"
students_data = "Resources/students_complete.csv"
# +
# Read the data file with the pandas library, Source File to Read
# Not every CSV requires an encoding, but be aware this can come up
# schools_data_df=pd.read_csv('resources/schools_complete.csv')
schools_data_df=pd.read_csv(schools_data, encoding="ISO-8859-1")
# display the header
# schools_data_df.head()
# verify counts - clean the data
# schools_data_df.count()
# display the contents of the data frame : Schools_data.csv
schools_data_df
# +
# Read the data file with the pandas library, Source File to Read
students_data_df=pd.read_csv(students_data, encoding='iso-8859-1')
# display the header
# students_data_df.head()
# verify counts - clean the data
# students_data_df.count()
# display the contents of the data frame : Students_data.csv
students_data_df
# +
# Combine the data into a single dataset.
# school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
pycityschools_df=pd.merge(schools_data_df,students_data_df,how='left', on=['school_name','school_name'])
# display the contents of the data frame
pycityschools_df
# -
#Save remade data frame to a new csv file, pycityschools_df to /output/pycityschools_combined.csv
pycityschools_df.to_csv('Output/pycityschools_combined.csv', encoding="utf-8", index="true",header="true")
# verify counts - clean the data
pycityschools_df.count()
# Display a statistical overview of the data frame
students_data_df.describe()
# Display data types
pycityschools_df.dtypes
# Collecting a list of all columns within the DataFrame
pycityschools_df.columns
# ## District Summary
#
# * Calculate the total number of schools
#
# * Calculate the total number of students
#
# * Calculate the total budget
#
# * Calculate the average math score
#
# * Calculate the average reading score
#
# * Calculate the percentage of students with a passing math score (70 or greater)
#
# * Calculate the percentage of students with a passing reading score (70 or greater)
#
# * Calculate the percentage of students who passed math **and** reading (% Overall Passing)
#
# * Create a dataframe to hold the above results
#
# * Optional: give the displayed data cleaner formatting
# Calculate the number of unique schools in the DataFrame
school_total = len(pycityschools_df["school_name"].unique())
school_total
# Calculate the number of unique students in the DataFrame
student_total = pycityschools_df["student_name"].count()
student_total
# Calculate the total budget in the DataFrame
budget_total = schools_data_df["budget"].sum()
budget_total
# Calculate the average math score in the DataFrame
average_math_score=round(pycityschools_df["math_score"].mean(), 6)
average_math_score
# Calculate the average reading score in the DataFrame
average_reading_score=round(pycityschools_df["reading_score"].mean(), 5)
average_reading_score
# Students with passing math (greater that 70%)
passing_math_df=students_data_df.loc[pycityschools_df['math_score']>=70, :]
passing_math_df.head()
# Calculate the percentage passing math in the DataFrame - #"% Passing Math":[percentage_passing_math]
students_passing_math=(passing_math_df['Student ID'].count()/student_total)*100
students_passing_math_total=round(students_passing_math, 6)
students_passing_math_total
# Students with passing reading (greater that 70%)
passing_reading_df=students_data_df.loc[students_data_df['reading_score']>=70, :]
passing_reading_df.head()
# Calculate the percentage passing reading in the DataFrame - #"% Passing Reading":[percentage_passing_reading]
students_passing_reading=(passing_reading_df["Student ID"].count()/student_total)*100
students_passing_reading_total=round(students_passing_reading, 6)
students_passing_reading_total
# Create data frames for math and reading, then merge the two to determine % passing overall - #"% Overall Passing":[percentage_passing_overall]
percent_overall_passing=(students_passing_math_total+students_passing_reading_total)/2
percent_overall_passing
#total_student_count=students_data_df['student_name'].count()
#percent_overall_passing=students_data_df[(students_data_df.loc['math_score']>=70) & (students_data_df.loc['reading_score']>=70)]['student_name'].count()/student_total
#percent_overall_passing
#CHECK THIS VALUE... ???
# Creating a summary DataFrame using above values
district_summary_df=pd.DataFrame({"Total Schools": [school_total],
"Total Students": [student_total],
"Total Budget": [budget_total],
"Average Math Score": [average_math_score],
"Average Reading Score": [average_reading_score],
"% Passing Math": [students_passing_math],
"% Passing Reading": [students_passing_reading],
"% Overall Passing Rate": [percent_overall_passing],
})
district_summary_df
# +
# Use Map to format all the columns
# district_summary_df=district_summary_df["Total Budget"].map("${:,}".format)
# district_summary_df.head()
district_summary_df.style.format({"Total Budget": "${:,.2f}"})
#"Average Reading Score": "{:.1f}",
#"Average Math Score": "{:.1f}",
#"% Passing Math": "{:.1%}",
#"% Passing Reading": "{:.0%}",
#"% Overall Passing Rate": "{:.1%}"})
# +
# # # Groupby students passing - get total students for each group and add total...
# # # percentage_passing_overall=["percentage_passing_math"+"Percentage_passing_reading"].sum()
# # # Calculate the number of unique authors in the DataFrame
# # # district_summary_df=pycityschools_df({"Schools Count": [school_count],"Student Count": student_count})
grouped_passing_math_df = passing_math_df.groupby(['Student ID'])
grouped_passing_math_df.head()
# -
grouped_passing_reading_df = passing_reading_df.groupby(['Student ID'])
grouped_passing_reading_df.head()
# +
converted_pycityschools2_df=pycityschools_df.copy()
converted_pycityschools2_df['Student ID']=converted_pycityschools2_df.loc[:, 'Student ID'].astype(float)
# display the contents of the data frame
converted_pycityschools2_df
# -
# DISTRICT SUMMARY #
district_summary_df
# ## School Summary
# * Create an overview table that summarizes key metrics about each school, including:
# * School Name
# * School Type
# * Total Students
# * Total School Budget
# * Per Student Budget
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * % Overall Passing (The percentage of students that passed math **and** reading.)
#
# * Create a dataframe to hold the above results
# Finding how many students each school has
# school_count = len(pycityschools_df["school_name"].unique())
pycityschools_df['school_name'].value_counts()
# Finding how many schools exist in the list
schools_unique_df=pycityschools_df["school_name"].unique()
schools_unique_df
# Finding the names of the students
students_unique_df=pycityschools_df['student_name'].unique()
students_unique_df
# Calculate the number of unique students in the DataFrame
# student_count = len(pycityschools_df["student_name"].unique())
student_counts = pycityschools_df["student_name"].value_counts()
student_counts
# Average by School Math Score
school_average_math_score = students_data_df.groupby('school_name')['math_score']. mean()
school_average_math_score
# Average by School Reading Score
school_average_reading_score = students_data_df.groupby('school_name')['reading_score']. mean()
school_average_reading_score
# Create dataframe
school_average_math_score_df = pd.DataFrame({'school_name':school_average_math_score.index,'school_average_math_score':school_average_math_score.values})
school_average_math_score_df
# Create dataframe
school_average_reading_score_df = pd.DataFrame({'school_name':school_average_reading_score.index, 'school_average_reading_score':school_average_reading_score.values})
school_average_reading_score_df
# +
# Count by school of students passing Math
school_count_passing_math = passing_math_df.groupby('school_name')['math_score'].count()
school_count_passing_math
school_count_passing_math_df = pd.DataFrame({'school_name':school_count_passing_math.index ,'school_count_passing_math':school_count_passing_math.values})
school_count_passing_math_df
# +
# Count by school of students passing Reading
school_count_passing_reading = passing_reading_df.groupby('school_name')['reading_score'].count()
school_count_passing_reading
school_count_passing_reading_df = pd.DataFrame({'school_name':school_count_passing_reading.index ,'school_count_passing_reading':school_count_passing_math.values})
school_count_passing_reading_df
# -
# Join Schools with their Average Math Score
schools_join_average_math_df = pd.merge(schools_data_df, school_average_math_score_df, on="school_name", how="outer")
schools_join_average_math_df.head()
# Join Schools with their Average Reading Score
schools_join_average_reading_df = pd.merge(schools_join_average_math_df, school_average_reading_score_df, on="school_name", how="outer")
schools_join_average_reading_df.head()
# Join Schools Count of Students Passing Math
schools_join_count_math_df = pd.merge(schools_join_average_reading_df, school_count_passing_math_df, on="school_name", how="outer")
schools_join_count_math_df.head()
# Join Schools Count of Students Passing Reading
schools_join_count_reading_df = pd.merge(schools_join_count_math_df, school_count_passing_reading_df, on="school_name", how="outer")
schools_join_count_reading_df.head()
# Naming Convention Change - Duplicate and rename merged datasets to new name
# (schools_join_count_reading_df to schools_merged_df)
schools_merged_df = schools_join_count_reading_df
schools_merged_df
# By School Calculate Percent of Students Passing Math
schools_merged_df['percent_passing_math'] = (schools_merged_df["school_count_passing_math"]/ schools_merged_df["size"]) * 100
schools_merged_df
# By School Calculate Percent of Students Passing Reading
schools_merged_df['percent_passing_reading'] = (schools_merged_df["school_count_passing_reading"]/ schools_merged_df["size"]) * 100
schools_merged_df
# By School Calculate Overall Passing Rate
schools_merged_df['overall_passing_rate'] = (schools_merged_df["percent_passing_math"] + schools_merged_df["percent_passing_math"])/ 2
schools_merged_df.head()
# By School Calculate Per Student Budget
schools_merged_df['per_student_budget'] = (schools_merged_df["budget"] / schools_merged_df["size"])
schools_merged_df.head()
# Naming Convention Change - Duplicate and rename merged datasets to new name
# (schools_merged_df to schools_summary_raw_df)
schools_summary_raw_df = schools_merged_df
schools_summary_raw_df
# df = df.reindex(columns=['mean',0,1,2,3,4])
schools_summary_reindexed_df=schools_summary_raw_df.reindex(columns=['School ID', 'school_name','type','size','budget','per_student_budget','school_average_math_score','school_average_reading_score','school_count_passing_math','school_count_passing_reading','percent_passing_math','percent_passing_reading','overall_passing_rate'])
schools_summary_reindexed_df
# Review data counts
schools_summary_reindexed_df.count()
schools_header_rename_df.columns
# Rename columns in dataframe
# change , back into : and brackets to {}
schools_header_rename_df = schools_summary_reindexed_df.rename(columns={"school_name":"School Name",
"type":"School Type",
"size": "Total Students",
"budget": "Total School Budget",
"per_student_budget": "Per Student Budget",
"school_average_math_score": "Average Math Score",
"school_average_reading_score": "Average Reading Score",
"school_count_passing_math": "Count Passing Math",
"school_count_passing_reading": "Count Passing Reading",
"percent_passing_math": "% Passing Math",
"percent_passing_reading": "% Passing Reading",
"overall_passing_rate":"% Overall Passing Rate"})
schools_header_rename_df.head()
# Reformate columns in dataframe
schools_header_rename_df["Total School Budget"] = schools_header_rename_df["Total School Budget"].map("${:,.2f}".format)
schools_header_rename_df["Per Student Budget"] = schools_header_rename_df["Per Student Budget"].map("${:,.2f}".format)
schools_header_rename_df.head()
# Naming Convention Change - Duplicate and rename merged datasets to new name
# (schools_header_rename_df to schools_summary_df)
schools_summary_df = schools_header_rename_df
schools_summary_df.head()
# Create the Final Dataframe - presentation
schools_summary_df
# SCHOOL SUMMARY OUTPUT #
schools_summary_sorted_df=schools_summary_df.sort_values("School Name")
schools_summary_sorted_df
# ## Top Performing Schools (By % Overall Passing)
# * Sort and display the top five performing schools by % overall passing.
# Top Performing Schools By Pass Rate
top_performing_schools_df = schools_summary_df.sort_values("% Overall Passing Rate",ascending=False)
top_performing_schools_df
# Top Performing Schools By Pass Rate - TOP FIVE Output
top_performing_schools_new_index = top_performing_schools_df.reset_index(drop=True)
top_performing_schools_new_index.head()
# ## Bottom Performing Schools (By % Overall Passing)
# * Sort and display the five worst-performing schools by % overall passing.
# Bottom Performing Schools By Pass Rate
bottom_performing_schools_df = schools_summary_df.sort_values("% Overall Passing Rate",ascending=True)
bottom_performing_schools_df
# Bottom Performing Schools By Pass Rate - BOTTOM FIVE Output
bottom_performing_schools_new_index = bottom_performing_schools_df.reset_index(drop=True)
bottom_performing_schools_new_index.head()
# ## Math Scores by Grade
# * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
#
# * Create a pandas series for each grade. Hint: use a conditional statement.
#
# * Group each series by school
#
# * Combine the series into a dataframe
#
# * Optional: give the displayed data cleaner formatting
# Math Scores By Grade
math_scores_by_grade = students_data_df[["school_name", "math_score", "grade"]]
math_scores_by_grade
# Math Scores By Grade Output
#math_scores_by_grade_df = math_scores_by_grade.pivot_table(index='school_name',columns='grade')
#math_scores_by_grade_df = pd.pivot_table(student_data_df, values=['math_score'], index=["school_name"], columns=['grade'])
#math_scores_by_grade_df = math_scores_by_grade_df.reindex_axis(labels=['9th','10th','11th','12th'],axis=1,level=1)
math_scores_by_grade_df
# ## Reading Score by Grade
# * Perform the same operations as above for reading scores
# Reading Scores By Grade
reading_scores_by_grade = students_data_df[["school_name", "reading_score", "grade"]]
reading_scores_by_grade
# Reading Scores By Grade Output
reading_scores_by_grade_df = reading_scores_by_grade.pivot_table(index='school_name',columns= 'grade')
reading_scores_by_grade_df
# ## Scores by School Spending
# * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
scores_by_school_spending = schools_summary_raw_df.copy()
scores_by_school_spending.head()
# Scores by School Spending - Create Bins
bins = [0, 585, 615, 645, 675]
group_labels = ["<$585", "$585-615", "$615-645", "$645-675"]
bins_school_spending = pd.cut(scores_by_school_spending["per_student_budget"], bins, labels=group_labels)
bins_school_spending = pd.DataFrame(bins_school_spending)
schools_summary_raw_df['Spending Level'] = bins_school_spending
#scores_by_school_spending = schools_summary_raw_df.groupby(['Spending Level'])['Average Math Score', 'Average Reading Score', % Passing Math','% Passing Reading', '% Overall Passing'].mean()
# ## Scores by School Size
# * Perform the same operations as above, based on school size.
scores_by_school_size = schools_summary_raw_df.copy()
scores_by_school_size.head()
# Create bins - we will need labels and bins
bins = [0, 1000, 2000, 5000]
size_labels = ['Small', 'Medium', 'Large']
# +
# Use bins and labels to sort through data and divide it up appropriately
# save bin data as bins_school_size variable
bins_school_size = pd.cut(scores_by_school_size['size'], bins, labels = size_labels)
# Convert bins_school_spending to df
bins_school_size = pd.DataFrame(bins_school_size)
# add 'School Population' col
schools_summary_raw_df['School Population'] = bins_school_size
# +
# Show cols for bins_school_size to verify
# bins_school_size.columns
# -
# Do a groupby on School Population and school name
scores_by_school_size = schools_summary_raw_df.groupby(['School Population','school name'])['Average Math Score',
'Average Reading Score',
'% Passing Math',
'% Passing Reading',
'% Overall Passing'
].mean()
scores_by_school_size
# ## Scores by School Type
# * Perform the same operations as above, based on school type
scores_by_school_Type = schools_summary_raw_df.copy()
scores_by_school_Type.head()
# +
# Convert scores_by_school_type to df
scores_by_school_type = pd.DataFrame(scores_by_school_type)
# Print scores_by_school_type cols to verify
# scores_by_school_type.columns
# -
# Do a groupby on School Type
scores_by_school_type = schools_summary_raw_df.groupby(['type'])['Average Math Score',
'Average Reading Score',
'% Passing Math',
'% Passing Reading',
'% Overall Passing'
].mean()
scores_by_school_type.head()
| Versions/PyCitySchoolsmain-Gauer2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# # Azure Machine Learning / AutoML Integration
#
# Capture results of AutoML experiments on the dataset and persist to Data Lake for reporting and analysis.
# ## Library Imports
#
import azureml.core
from azureml.core import Workspace, Datastore, Dataset, Run
from azureml.core.run import Run
from azureml.core.experiment import Experiment
from azureml.core.model import Model
from azureml.interpret import ExplanationClient
from pyspark.sql.functions import *
import pprint
# ## Read Model Metrics from Azure ML
#
# Connect to the Azure ML workspace and extract metrics from the AutoML run.
# +
# connect to Azure ML
subscription_id = ''
workspace_name = ''
resource_group = ''
ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
#pp = pprint.PrettyPrinter()
#pp.pprint(ws.get_details())
# -
# ### Pull Metrics from Run
#
# Gather the following metrics:
#
# * AUC = AUC_weighted
# * Accuracy = accuracy
# * Precision = precision_score_weighted
# * Recall = recall_score_weighted
# * F1 = f1_score_weighted
#
# +
# pull all metrics of best run
experiment_name = ''
run_id = ''
experiment = Experiment(workspace=ws, name=experiment_name)
fetched_run = Run(experiment, run_id)
metrics = fetched_run.get_metrics()
#pp = pprint.PrettyPrinter()
#pp.pprint(metrics)
# + diagram={"activateDiagramType": 1, "aggData": "{\"Value\":{\"AUC\":0.7530583094702326,\"Accuracy\":0.6831256729733887,\"F1\":0.681859512812026,\"Precision\":0.682347667116298,\"Recall\":0.6831256729733887}}", "chartConfig": {"aggByBackend": false, "aggregation": "SUM", "category": "bar", "keys": ["Metric"], "values": ["Value"], "xLabel": "Metric", "yLabel": "Value"}, "isSql": false, "isSummary": false, "previewData": {"filter": null}}
# select relevant metrics
auc = metrics.get('AUC_weighted')
accuracy = metrics.get('accuracy')
precision = metrics.get('precision_score_weighted')
recall = metrics.get('recall_score_weighted')
f1 = metrics.get('f1_score_weighted')
# combine into single dataframe
metrics_df = sc.parallelize([['AUC', auc], ['Accuracy', accuracy], ['Precision', precision], ['Recall', recall], ['F1', f1]]).toDF(('Metric', 'Value'))
#display(metrics_df)
# -
# ## Read Feature Importances from AutoML
#
client = ExplanationClient.from_run(fetched_run)
engineered_explanations = client.download_model_explanation(raw=False)
features_dict = engineered_explanations.get_feature_importance_dict()
# + diagram={"activateDiagramType": 1, "aggData": "{\"Value\":{\"avg_cart_abandon_rate_MeanImputer\":0.11945935832914051,\"avg_conversion_rate_per_user_per_month_MeanImputer\":0.20401384027776798,\"avg_order_value_per_user_per_month_MeanImputer\":0.41822444885119187,\"avg_session_duration_per_user_per_month_MeanImputer\":0.27078611121785473,\"brand_acer_purchased_binary_ModeCatImputer_LabelEncoder\":0.0175239679182456,\"brand_apple_purchased_binary_ModeCatImputer_LabelEncoder\":0.021093607022107382,\"brand_huawei_purchased_binary_ModeCatImputer_LabelEncoder\":0.0026359798356834024,\"brand_samsung_purchased_binary_ModeCatImputer_LabelEncoder\":0.008199318744508445,\"brand_xiaomi_purchased_binary_ModeCatImputer_LabelEncoder\":0.009319515351190353,\"product_id_1004767_purchased_binary_ModeCatImputer_LabelEncoder\":0.0006593092513365638,\"product_id_1004833_purchased_binary_ModeCatImputer_LabelEncoder\":0.0008278505122272443,\"product_id_1004856_purchased_binary_ModeCatImputer_LabelEncoder\":0.00024530348177285977,\"product_id_1005115_purchased_binary_ModeCatImputer_LabelEncoder\":0.006105109365293523,\"product_id_4804056_purchased_binary_ModeCatImputer_LabelEncoder\":0.0003915675165785674,\"sessions_per_user_per_month_MeanImputer\":0.40902442785462995,\"subcategory_audio_purchased_binary_ModeCatImputer_LabelEncoder\":0.0859806049597816,\"subcategory_clocks_purchased_binary_ModeCatImputer_LabelEncoder\":0.011233049590709289,\"subcategory_smartphone_purchased_binary_ModeCatImputer_LabelEncoder\":0.011650992130277638,\"subcategory_tablet_purchased_binary_ModeCatImputer_LabelEncoder\":0,\"subcategory_telephone_purchased_binary_ModeCatImputer_LabelEncoder\":0.00031129917547430985}}", "chartConfig": {"aggByBackend": false, "aggregation": "SUM", "category": "bar", "keys": ["Feature"], "values": ["Value"], "xLabel": "Feature", "yLabel": "Value"}, "isSql": false, "isSummary": false, "previewData": {"filter": null}}
# save to list and convert numpy types to native
features_list = []
for key, value in features_dict.items():
temp = [key.item(),value.item()]
features_list.append(temp)
# save to dataframe
features_df = spark.createDataFrame(features_list, ['Feature', 'Value'])
#display(features_df)
# -
# ## Save Results to Data Lake
#
# Persist the model results to CSV files on the Data Lake for reporting.
#
data_lake_account_name = ''
file_system_name = ''
metrics_df.coalesce(1).write.option('header', 'true').mode('overwrite').csv(f'abfss://{file_system_name}@{data_lake_account_name}.dfs.core.windows.net/reporting/model_metrics')
features_df.coalesce(1).write.option('header', 'true').mode('overwrite').csv(f'abfss://{file_system_name}@{data_lake_account_name}.dfs.core.windows.net/reporting/feature_importances')
| Analytics_Deployment/synapse-workspace/notebooks/5 - Azure ML Integration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import laspy
import numpy as np
import scipy
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn import preprocessing
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import path
import seaborn as sns
sns.set()
with laspy.open("F:/Data/Lidar/1_20170601.laz") as lidar:
las = lidar.read()
# +
#las.write("F:/Data/Lidar/new/sample.las")
# -
las.header.point_count
las.classification == 1
las.classification == 2
las
unclassified = las[las.classification == 1]
lastquarter = unclassified[int(len(unclassified)/4):]
len(lastquarter)
las.intensity
unclassified.intensity
len(unclassified)
len(las)
unclassified.header.point_count
ground = las[las.classification == 2]
ground.z
xyzdataset = np.vstack([ground.x,ground.y,ground.z]).transpose()
xyzdataset.shape
dataset_normal = preprocessing.normalize(xyzdataset)
ucx = np.array(unclassified.x)
ucy = np.array(unclassified.y)
ucz = np.array(unclassified.z)
uci = np.array(unclassified.intensity)
uc_array = np.vstack((ucx,ucy,ucz,uci)).T
uc_array.shape
nbr = NearestNeighbors(n_neighbors=2)
nbrs = nbr.fit(uc_array)
distances,indices = nbrs.kneighbors(uc_array)
distances = np.sort(distances,axis=0)
distances = distances[:,1]
plt.plot(distances)
uc_clusters = DBSCAN(eps=0.4,min_samples=15,leaf_size=10).fit(uc_array)
print("Number of clusters:" + str(len(np.unique(uc_clusters.labels_))))
print("Points clustered: " + str(len([i for i in uc_clusters.labels_ if i != -1])))
len(uc_clusters.labels_)
plt.scatter(ucx,ucy, c=uc_clusters.labels_.astype(float))
plt.xlim(481300,481400)
plt.ylim(5501050,5501150)
plt.scatter(ucx,ucy, c=uc_clusters.labels_.astype(float))
plt.show()
# ## GROUND
groundx = np.array(ground.x)
groundy = np.array(ground.y)
groundz = np.array(ground.z)
groundi = np.array(ground.intensity)
gnd_array = np.vstack((groundx,groundy,groundz,groundi)).T
gnd_array.shape
nbr = NearestNeighbors(n_neighbors=2)
nbrs = nbr.fit(gnd_array)
distances,indices = nbrs.kneighbors(gnd_array)
distances = np.sort(distances,axis=0)
distances = distances[:,1]
plt.plot(distances)
gnd_clusters = DBSCAN(eps=2,min_samples=10).fit(gnd_array)
len(np.unique(gnd_clusters.labels_))
plt.scatter(groundx,groundy, c=gnd_clusters.labels_.astype(float))
plt.xlim(481300,481400)
plt.ylim(5501050,5501150)
plt.scatter(groundx,groundy, c=gnd_clusters.labels_.astype(float))
plt.show()
| Test_Notebooks/CIND820 - initial testing code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
frame1 = misc.imread('frame1.png')
frame3 = misc.imread('frame3.png')
plt.imshow(frame1)
plt.show()
frame2 = np.full(frame1.shape, 255, dtype=np.uint8)
plt.imshow(frame2)
plt.show()
misc.imsave("frame2.pgm",frame2)
arr = np.stack((frame1[:,:,0], frame2[:,:,0], frame3[:,:,0], frame3[:,:,0]))
arr = np.expand_dims(arr, axis=3)
arr = np.expand_dims(arr, axis=4)
print arr.shape
frame1.shapevigra.writeHDF5(arr, 'Raw.h5', 'exported_data')
| tests/skipLinksTestDataset/.ipynb_checkpoints/create_images-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dev
# language: python
# name: dev
# ---
# # Finding the Best k
#
# In this activity, you’ll apply the elbow method to iteratively run the K-means algorithm and find the optimal number of clusters, or value for `k`.
#
# Instructions
#
# 1. Read in the `option_trades.csv` file from the Resources folder and create a DataFrame. Use the “date” column to create the DateTime Index. Be sure to include parameters for `parse_dates` and `infer_datetime_format`.
#
# > **Note** The option data that’s provided for this activity contains the prices of options, measured every four hours, on various stock options on the S&P 500. These stock options differ by various characteristics, including the time to expiration.
#
# 2. Create two lists: one for the range of lowercase-k values (from 1 to 11) to analyze and another to hold the list of inertia scores.
#
# 3. For each instance of k, define and fit a K-means model, and append the model’s inertia to the empty inertia list that you created in Step 2.
#
# 4. Store the values for lowercase-k and the inertia lists in a DataFrame called `df_elbow_data`.
#
# 5. Using hvPlot, plot the `df_elbow_data` DataFrame to visualize the elbow curve. Be sure to style and format your plot.
#
# 6. Answer the following question: Considering the plot, what’s the best number of clusters to choose, or value of k?
#
# References
#
# [scikit-learn Python Library](https://scikit-learn.org)
#
# [scikit-learn ML Algorithms](https://scikit-learn.org/stable/user_guide.html)
#
# [scikit-learn K-means](https://scikit-learn.org/stable/modules/clustering.html#k-means)
#
#
#
# Import the required libraries and dependencies
import pandas as pd
import hvplot.pandas
from path import Path
from sklearn.cluster import KMeans
import os
os.environ["OMP_NUM_THREADS"] = "1" # export OMP_NUM_THREADS=1
# ## Step 1: Read in the `option_trades.csv` file from the Resources folder and create a DataFrame. Use the “date” column to create the DateTime Index. Be sure to include parameters for `parse_dates` and `infer_datetime_format`.
# Read the CSV file into a Pandas DataFrame
# Use the date column to create the DateTime Index
df_options = pd.read_csv(
Path("../Resources/option_trades.csv"),
index_col='date',
parse_dates=True,
infer_datetime_format=True
)
# Review the DataFrame
# YOUR CODE HERE
df_options.head()
# ## Step 2: Create two lists: one for the range of lowercase-k values (from 1 to 11) to analyze and another to hold the list of inertia scores.
# Create a list for the range of k's to analyze in the elbow plot
# The range should be 1 to 11.
k = list(range(1,11))
# Create an empty list to hold inertia scores
inertia = []
# ## Step 3: For each instance of k, define and fit a K-means model, and append the model’s inertia to the empty inertia list that you just created.
# +
# For each k, define and fit a K-means model and append its inertia to the above list
# Hint: This will require the creation of a for loop.
# YOUR CODE HERE
for i in k:
model = KMeans(n_clusters=i, random_state=0)
model.fit(df_options)
inertia.append(model.inertia_)
# View the inertia list
# YOUR CODE HERE
inertia
# -
# ## Step 4: Store the values for lowercase-k and the inertia lists in a DataFrame called `df_elbow_data`.
# +
# Create a dictionary with the data to plot the Elbow curve
elbow_data = {
'k': k,
'inertia': inertia
}
# YOUR CODE HERE
# +
# Create a DataFrame from the dictionary holding the values for k and inertia.
df_elbow_data = pd.DataFrame(elbow_data)
# YOUR CODE HERE
# -
# ## Step 5: Using hvPlot, plot the `df_elbow_data` DataFrame to visualize the elbow curve. Be sure to style and format your plot.
# Plot the elbow curve using hvPlot.
# YOUR CODE HERE
df_elbow_data.hvplot.line(
x='k',
y='inertia',
title='Elbow Curve',
xticks=k
)
# ## Step 6: Answer the following question:
# **Question** Considering the plot, what’s the best number of clusters to choose, or value of k?
#
# **Answer** # YOUR ANSWER HERE
from sklearn.preprocessing import StandardScaler
| from_bcs/01-Finding_the_Best_k/01-Finding_the_Best_k/Unsolved/finding_the_best_k.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from navitia_client import Client
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
import pandas as pd
import numpy as np
import copy
import json
pd.set_option('display.max_columns', 999)
pd.set_option('display.max_colwidth', 10000)
# give the TravelMyWay API jey
client = Client(user='8<PASSWORD>-5<PASSWORD>-<PASSWORD>-9ff6-5<PASSWORD>5<PASSWORD>da6')
# +
def get_navitia_coverage():
"""
The navitia API is separated into different coverage region (one for california, one for PAris-IDF ...)
We call the API to get all those coverage and know which coverage region to call for a given scenario
"""
# call API for all coverages
response_cov = client.raw('coverage', multipage=False, page_limit=10, verbose=True)
# turn coverage into DF
df_cov = pd.DataFrame.from_dict(response_cov.json()['regions'])
print(f'{df_cov.shape[0]} regions found, here is an example:\n {df_cov.sample()}')
# clean the geographical shape
df_cov['polygon_clean'] = df_cov.apply(clean_polygon_for_coverage, axis=1)
return df_cov
def clean_polygon_for_coverage(x):
"""
The API call for coverage returns multipolygons (a list of one or several polygon) for each region
but it is a string that we must convert to an actual Polygon object (in order to use function is point in polygon)
Most regions have only one polygon, so we decide to only consider the biggest polygon for each regions
"""
# split "polygon" as a string
if x['shape'] == '':
# Polygon is null
return None
# split by '(' to see if there are several shape within polygon
split_meta = x['shape'].split('(')
# we want ton only keep the biggest Polygon, first we compute sizes for all "polygon"
sizes_pol = np.array([])
for i in split_meta:
sizes_pol = np.append(sizes_pol,len(sizes_pol))
# keep the biggest and act like there was only one from the beginning
split_pol = split_meta[np.argmax(sizes_pol)]
# Let's split the polygon into a list of geoloc (lat, long)
split_pol = split_pol.split(',')
# clean the last point (the first and the last are the same cause the polygon has to be "closed")
split_pol[-1] = split_pol[0]
# recreate latitude and longitude list
lat = np.array([])
long = np.array([])
for point in split_pol:
split_point = point.split(' ')
lat = np.append(lat,split_point[0])
long = np.append(long,split_point[1])
# return the object Polygon
return Polygon(np.column_stack((long, lat)))
def find_navita_coverage_for_points(point_from, point_to, df_cov):
"""
This function finds in which coverage regions are the 2 points.
If any point is not in any region, or the 2 points are in different regions we have an error
"""
# test if points are within polygon for each region
are_points_in_cov = df_cov[~pd.isna(df_cov.polygon_clean)].apply(lambda x: (x.polygon_clean.contains(point_from))&(x.polygon_clean.contains(point_to)), axis=1)
# find the good region
id_cov = df_cov[~pd.isna(df_cov.polygon_clean)][are_points_in_cov].id
if not id_cov.empty:
return id_cov.values[0]
else:
return 'no region found'
# +
def section_json_summary(x):
"""
For each section we create a json with type of transportation + duration
"""
json_summary = {}
if not pd.isna(x['display_informations']):
json_summary["type"] = x['display_informations']['physical_mode']
else :
json_summary["type"] = x['type']
json_summary["duration"] = x['duration']
return json_summary
def get_section_details(x):
"""
For each journey we list all the section summary (type + duration)
"""
sections = pd.DataFrame.from_dict(x.sections)
sections['summary'] = sections.apply(section_json_summary, axis=1)
section_details = []
for value in sections['summary']:
section_details.append(value)
return section_details
def compute_journey(point_from,point_to):
"""
Main function takes two points and compute detailed journeys between the 2. It does all the necessary steps
1 get the relevant coverage region
2 Call the navitia API
3 Compute details for each journey
"""
coverage_region = find_navita_coverage_for_points(point_from,point_to , df_cov)
if coverage_region == 'no region found':
return 'the points are not within the voverage of navitia API'
url = f'coverage/{coverage_region}/journeys?from={point_from.y};{point_from.x}&to={point_to.y};{point_to.x}'
response = client.raw(url, multipage=False, page_limit=10, verbose=True)
df_journey = pd.DataFrame.from_dict(response.json()['journeys'])
print(f'{df_journey.shape[0]} journeys found')
df_journey['section_details'] = df_journey.apply(get_section_details, axis=1)
df_journey['price_total'] = df_journey.apply(lambda x: x.fare['total']['value'], axis = 1)
return df_journey[[]]
# -
df_cov = get_navitia_coverage()
journeys.columns
# +
"""
To test the API, you can enter any point from and to and run the compute_journey function
"""
latitude_from = 48.88471
longitude_from = 2.370697
latitude_to = 48.78471
longitude_to = 2.470697
journeys = compute_journey(Point(latitude_from,longitude_from),Point(latitude_to,longitude_to))
journeys[['arrival_date_time', 'calendars', 'co2_emission', 'departure_date_time', 'distances', 'duration',
'durations', 'fare', 'nb_transfers',
'requested_date_time', 'status', 'tags', 'type', 'section_details', 'price_total']]
# -
# See the details of a given journey
journeys.section_details[0]
| test_apis/Test Navitia API.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Handle large corpus
# Cherche was designed primarily to create a neural search pipeline on a corpus of
# moderate size. A large corpus is a corpus where not all documents can be stored in memory and/or when the retrievers `retrieve.TfIdf`, `retrieve.BM25Okapi`, `retrieve.Lunr` are not fast enough. If you want to work with large corpora, consider looking at [Jina](https://github.com/jina-ai/jina). Nevertheless, Cherche is compatible with neural search on large corpora using Python's Elasticsearch client `retrieve.Elastic`. **In this tutorial we will use ElasticSearch to act as a retriever and to store the ranker embeddings.**
# Of course to establish the connection with Elasticsearch you need to have a server with Elasticsearch running or to run Elasticsearch on your local machine. The installation of Elasticsearch is explained [here](https://www.elastic.co/downloads/elasticsearch). The first step is to initialise the `retrieve.Elastic` retriever. `retrieve.Elastic` takes a parameter `es` that establishes the connection with Elasticsearch.
#
# Also to do neural search on a large corpus, you will need a GPU at least to pre-compute document embeddings. A GPU is not mandatory in a production environment if you don't want to do question answering or summarising.
# **In this tutorial, I will present two distinct solutions for implementing the neural search pipeline.**
#
# - **Scenario 1: Connecting remotely to ElasticSearch from the GPU computer to index documents and embeddings.**
# - **Scenario 2: Index documents and embeddings on Elasticsearch without a remote connection.**
# ### Scenario 1: Connecting remotely to ElasticSearch from the GPU computer to index documents and embeddings.
#
# We are on the computer that own a GPU here.
#
from cherche import retrieve, rank
from sentence_transformers import SentenceTransformer
from elasticsearch import Elasticsearch
# My Elasticsearch server runs locally on port 9200 on my computer. You should replace `localhost:9200` with your own Elasticsearch adress if it's remote.
# +
es = Elasticsearch(hosts="localhost:9200")
if es.ping():
print("Elasticsearch is running.")
else:
print("Elasticsearch is not running.")
# -
# We declare our neural search pipeline make of a ranker and a retriever
# +
retriever = retrieve.Elastic(
es = es,
key = "id",
on = "document",
k = 100,
index = "large_corpus"
)
ranker = rank.Encoder(
key = "id",
on = "document",
encoder = SentenceTransformer("sentence-transformers/all-mpnet-base-v2", device="cuda").encode,
k = 10,
)
# -
# Now we will be able to index our documents and embeddings simultaneously. This process takes time if you have a lot of documents. You could run it in parallel from several computers for example.
# +
# Imagine 1 millions documents instead of 3. 😅
documents = [
{"id": 0, "document": "Toulouse is a municipality in south-west France. With 486,828 inhabitants as of 1 January 2018, Toulouse is the fourth most populous commune in France after Paris, Marseille and Lyon, having gained 101,000 inhabitants over the last 47 years (1968-2015)"},
{"id": 1, "document": "Montreal is the main city of Quebec. A large island metropolis and port on the St. Laurent River at the foot of the Lachine Rapids, it is the second most populous city in Canada, after Toronto."},
{"id": 2, "document": "Bordeaux is a French commune located in the Gironde department in the Nouvelle-Aquitaine region."}
]
retriever.add_embeddings(documents=documents, ranker=ranker)
# -
# Et voilà.
# ### Scenario 2: Index documents and embeddings on Elasticsearch without a remote connection.
#
# We are on the GPU computer
# +
import json
from cherche import rank
from sentence_transformers import SentenceTransformer
# -
ranker = rank.Encoder(
key = "id",
on = "document",
encoder = SentenceTransformer("sentence-transformers/all-mpnet-base-v2", device="cuda").encode,
k = 10,
)
# On the GPU machine, we can compute document embeddings and save the embeddings as a json file or Pickle file for loading on the computer that has access to ElasticSearch.
# +
# Imagine 1 millions documents instead of 3. 😅
documents = [
{"id": 0, "document": "Toulouse is a municipality in south-west France. With 486,828 inhabitants as of 1 January 2018, Toulouse is the fourth most populous commune in France after Paris, Marseille and Lyon, having gained 101,000 inhabitants over the last 47 years (1968-2015)"},
{"id": 1, "document": "Montreal is the main city of Quebec. A large island metropolis and port on the St. Laurent River at the foot of the Lachine Rapids, it is the second most populous city in Canada, after Toronto."},
{"id": 2, "document": "Bordeaux is a French commune located in the Gironde department in the Nouvelle-Aquitaine region."}
]
for document, embedding in zip(documents, ranker.embs(documents=documents)):
# embeddings is important here, you should not change the key.
document["embedding"] = embedding.tolist()
# You can process the documents per batch and export them in differents json files.
with open("documents_embeddings.json", "w") as documents_embeddings:
json.dump(documents, documents_embeddings, indent = 4)
# -
# We are now on a computer that has access to the running Elasticsearch server. We have previously transferred the json file from the machine that has a GPU to the machine that has access to Elasticsearch.
# +
import json
from cherche import retrieve
from elasticsearch import Elasticsearch
# -
# My Elasticsearch server runs locally on port 9200 on my computer. You should replace `localhost:9200` with your own Elasticsearch adress if it's remote.
# +
es = Elasticsearch(hosts="localhost:9200")
if es.ping():
print("Elasticsearch is running.")
else:
print("Elasticsearch is not running.")
# -
# We will be able to index the documents and embeddings that we have previously calculated.
# +
with open("documents_embeddings.json", "r") as documents_embeddings:
json.load(documents_embeddings)
retriever = retrieve.Elastic(
key = "id",
on = "document",
es = es,
k = 100,
index = "large_corpus"
)
retriever.add(documents)
# -
# Et voila.
# You can now query your neural search pipeline via the `retrieve.Elastic` retriever without a GPU and have decent performance.
# +
from cherche import retrieve, rank
from sentence_transformers import SentenceTransformer
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts="localhost:9200")
retriever = retrieve.Elastic(
key = "id",
on = "document",
es = es,
k = 100,
index = "large_corpus"
)
ranker = rank.Encoder(
key = "id",
on = "document",
encoder = SentenceTransformer("sentence-transformers/all-mpnet-base-v2").encode,
k = 1,
)
search = retriever + ranker
# -
search("Toulouse")
# ### Time for a real demo on 600,000 documents - CPU.
# To make the demonstration more convincing, I indexed 600,000 wikipedia articles following scenario 2 with google collaboratory to calculate the embeddings and an Elasticsearch server running locally on my pc. Now we don't need a GPU anymore since we pre-computed embeddings.
# +
from cherche import retrieve, rank
from sentence_transformers import SentenceTransformer
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts="localhost:9200")
retriever = retrieve.Elastic(
key="id",
es = es,
on = "document",
k = 100,
index = "wiki" # My wiki index contains 700000 documents
)
ranker = rank.Encoder(
key="id",
encoder = SentenceTransformer("sentence-transformers/all-mpnet-base-v2").encode,
on = "document",
k = 10,
)
search = retriever + ranker
# -
# The neural search pipeline references 600,000 documents.
search
# On my computer, which only uses a CPU, it takes 100 ms to query all these documents, which is great.
# %timeit search("Toulouse")
search("Toulouse")
# Of course we can connect a question answering model or a summarization model to our neural pipeline. **However, these models are heavier and require a GPU to maintain the performance level.**
# +
from cherche import qa
from transformers import pipeline
search = retriever + ranker + qa.QA(
model = pipeline("question-answering", model = "deepset/roberta-base-squad2", tokenizer = "deepset/roberta-base-squad2"),
on = "document",
k = 2
)
# -
search("What is Python?")
# Summarization pipeline
# +
from cherche import summary
search = retriever + ranker + summary.Summary(
model = pipeline("summarization", model="sshleifer/distilbart-cnn-6-6", tokenizer="sshleifer/distilbart-cnn-6-6", framework="pt"),
on = "document",
)
# -
search("What is Python?")
| docs/examples/large_corpus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''Portfolio-Tracker'': pipenv)'
# metadata:
# interpreter:
# hash: 0600588c3b5f4418cbe7b5ebc6825b479f3bc010269d8b60d75058cdd010adfe
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.neighbors import NearestNeighbors
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(client_id="9e5f390cdd7e44658f17b1b3228c02b2",
client_secret="a2d7e42715d54e12a3a4e86ba5d93dc0"))
"""
Primary:
- id (Id of track generated by Spotify)
Numerical:
- acousticness (Ranges from 0 to 1)
- danceability (Ranges from 0 to 1)
- energy (Ranges from 0 to 1)
- duration_ms (Integer typically ranging from 200k to 300k)
- instrumentalness (Ranges from 0 to 1)
- valence (Ranges from 0 to 1)
- popularity (Ranges from 0 to 100)
- tempo (Float typically ranging from 50 to 150)
- liveness (Ranges from 0 to 1)
- loudness (Float typically ranging from -60 to 0)
- speechiness (Ranges from 0 to 1)
- year (Ranges from 1921 to 2020)
Dummy:
- mode (0 = Minor, 1 = Major)
- explicit (0 = No explicit content, 1 = Explicit content)
Categorical:
- key (All keys on octave encoded as values ranging from 0 to 11, starting on C as 0, C# as 1 and so on…)
- artists (List of artists mentioned)
- release_date (Date of release mostly in yyyy-mm-dd format, however precision of date may vary)
- name (Name of the song)"""
# reading csv
df = pd.read_csv('suggestor/edited_data_v2.csv')
df
# adding direct url to data set by adding url prefix and id
url = 'http://open.spotify.com/track/' + df['id']
df['url'] = url
# reordering columns, leaving out ID and release date
df = df[['combined', 'url', 'year', 'acousticness', 'danceability', 'duration_ms', 'energy',
'explicit', 'instrumentalness', 'key', 'liveness', 'loudness',
'mode', 'popularity', 'speechiness', 'tempo', 'valence']]
df
# finding a song I want to use as an input
tay = df[df['combined'] == 'Heather - CONAN GRAY'].sort_values('popularity', ascending=False)
tay
# +
# target set will be both artist and name
y_set = ['combined', 'url']
# droping target from data matrix
df_data = df.drop(y_set, axis=1)
# set target
df_target = df[y_set]
# -
# fit on data, 12 neighbors
nn = NearestNeighbors(algorithm='brute', metric='cosine', leaf_size =15, n_neighbors=12, n_jobs=-1)
nn.fit(df_data)
# +
# sample a song(index) from df_data to use as our query point
input_index = 18676 # 'Heather - CONAN GRAY
# vectorize
data_vect = [df_data.iloc[input_index].values]
data_vect
# -
# Query Using kneighbors
neigh_dist, neigh_indices = nn.kneighbors(data_vect)
# top 12 closest data vectors to our reference vector, data_vect
neigh_dist
# these are the corresponding indicies of the most similar vectors
neigh_indices
# breaking down the array to a list
indexs = neigh_indices.flat[0:12].tolist()
indexs
# result metrics
df_data.iloc[indexs]
# result target
df_target.iloc[indexs]
| josh.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import scipy.io
import numpy as np
import matplotlib.image as img
import matplotlib.pyplot as plt
dir = "./cifar"
filelist = ["data_batch_1.mat","data_batch_2.mat","data_batch_3.mat","data_batch_4.mat","data_batch_5.mat"]
width = 32 # 299
height = 32 # 299
color = 3
# -
def calculate(img, synapses):
# img (1, 3072)
# synapses( hid, 3072 )
img = img/255.0
print(img)
trans_img = np.transpose(img)
m = np.argmax(np.dot(synapses, trans_img))
output = img*synapses[m] # 1,3072
# reshape
output = get_3d_img(output) # 1,32,32,3
return output
# +
def get_flattend_img(img):
newImg = np.zeros((1, 3*width*height))
R = img[0,:,:,0].flatten()
G = img[0,:,:,1].flatten()
B = img[0,:,:,2].flatten()
newImg[0,:height*width*1] = R
newImg[0,height*width*1:height*width*2] = G
newImg[0,height*width*2:height*width*3] = B
return newImg
# -
def get_3d_img(img):
single_img_reshaped = np.transpose(np.reshape(img,(1,3, 32,32)), (0,2,3,1))
return single_img_reshaped
# +
synapses = np.load("synapse.npy")
synapses_3_7 = np.load("synapses_3_7.npy")
# img reading
mat = scipy.io.loadmat(dir+'/'+'test_batch.mat')
test = mat['data']
img = test[1050, :].reshape(1,3072)
plt.subplot(311)
ori_img = get_3d_img(img)/255.0
plt.imshow(ori_img[0])
# img = get_flattend_img(img_filtered)
print("calculating...v1")
img_filtered_v1 = calculate(img,synapses)
print("calculating...v2")
img_filtered_v2 = calculate(img,synapses_3_7)
plt.subplot(312)
plt.imshow(img_filtered_v1[0])
plt.subplot(313)
plt.imshow(img_filtered_v2[0])
diff_v1 = np.sum(np.absolute(ori_img-img_filtered_v1))/3072
diff_v2 = np.sum(np.absolute(ori_img-img_filtered_v2))/3072
print("differ_v1:",diff_v1)
print("differ_v2:",diff_v2)
# -
| compareImages.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,md:myst
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# (documentation_exercises)=
# # Exercises
#
# **After** completing the tutorial attempt the following exercises.
#
# **If you are not sure how to do something, have a look at the "How To" section.**
#
# Write documentation for the `statistics.py` file written in the exercises of
# [Modularisation Exercises](modularisation_exercises).
| book/building-tools/06-documentation/exercises/.main.md.bcp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jaanhavigautam666/Titanic-Data-ML-from-Disaster/blob/main/Data_Exploration.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="KKpqOQYGOUZA"
import numpy as np
import pandas as pd
# + id="pGdIt0C_Oxsb"
dataset_train = pd.read_csv('train.csv')
dataset_test = pd.read_csv('test.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="niVLUb3TxZQa" outputId="c49b7303-9743-4385-fd26-dd59b7f70ee5"
dataset_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="fUZ4CE0qxcvP" outputId="1d8b76df-1b33-4212-ae2e-f90ed97a8c67"
dataset_train.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="Jf1v9Dszxhwz" outputId="c6cace5a-ea10-47e6-c6b6-90600a96225f"
dataset_train.info()
# + colab={"base_uri": "https://localhost:8080/"} id="zIy3sum2xnK_" outputId="3b432bfa-d43f-4779-8d01-7deb20b13efe"
dataset_train.shape
# + id="r0q-8jUuzESO" colab={"base_uri": "https://localhost:8080/"} outputId="4baad677-363f-4d90-97fb-f3066d02c3a5"
dataset_train.Sex.value_counts()
# + id="cYv_SfmKoEKx"
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 411} id="altGMMw-t0Pe" outputId="6cd495da-e655-4a14-9f53-899012a4cbd6"
gender = ['male','female']
values = [577,314]
fig = plt.figure(figsize =(10, 7))
plt.pie(values, labels = gender)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="Is2Wm1BBvJll" outputId="4edc107d-265c-4c24-9a7c-cddab3343125"
dataset_train.loc[(dataset_train.Survived== 1)&(dataset_train.Sex == 'female')].shape
# + colab={"base_uri": "https://localhost:8080/"} id="EjVV2L807Xfu" outputId="26a0a9a0-660a-48df-e3af-26747748b7ec"
dataset_train.loc[(dataset_train.Survived== 0)&(dataset_train.Sex == 'female')].shape
# + colab={"base_uri": "https://localhost:8080/"} id="4uICvTiT968j" outputId="105daf15-886b-4007-c2a5-ec09053c98d7"
dataset_train.loc[(dataset_train.Survived== 1)&(dataset_train.Sex == 'male')].shape
# + colab={"base_uri": "https://localhost:8080/"} id="zy5cisWW-C2B" outputId="b6094cc9-54a0-4e8a-8f25-18efc58913f1"
dataset_train.loc[(dataset_train.Survived== 0)&(dataset_train.Sex == 'male')].shape
# + colab={"base_uri": "https://localhost:8080/"} id="u2SX7I5k-IkB" outputId="f6292147-d30a-40f6-a96b-b00b203d97dd"
percentage_male_survived = 109/(468+109)*100
percentage_male_survived
# + colab={"base_uri": "https://localhost:8080/"} id="cgLEj9Ov_cPv" outputId="a76759f1-d05d-4963-a4ad-694395c4660b"
percentage_male_dead = 468/(468+109)*100
percentage_male_dead
# + colab={"base_uri": "https://localhost:8080/"} id="RbJti4PUBlCE" outputId="4554f99d-d969-4aaa-b48d-7bf2e2c327d6"
percentage_female_survived = 233/(233+81)*100
percentage_female_survived
# + colab={"base_uri": "https://localhost:8080/"} id="fT1JCn7xCc3X" outputId="6776f5ab-6232-40ba-85bb-aba04b197616"
percentage_female_dead = 81/(233+81)*100
percentage_female_dead
# + colab={"base_uri": "https://localhost:8080/", "height": 400} id="dMR14V_cCrR5" outputId="a7daceb8-71ea-440f-c6e8-9012041e9834"
import seaborn as sns
# Countplot
sns.catplot(x ="Sex", hue ="Survived",
kind ="count", data = dataset_train)
# + [markdown] id="gFjpMjFKTVWv"
#
#
# * survival rate of men is around 20% and that of women is around 75%. Therefore, whether a passenger is a male or a female plays an important role in determining if one is going to survive.
#
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 337} id="tv9VCeUoPpa3" outputId="c624e5fa-6d4b-4c59-fc72-fdad17bb5449"
sns.heatmap(dataset_train.isnull(), cbar=False)
# + [markdown] id="cwEmCFy8SvN4"
#
#
# * Age column has missing values with variation in occurrence.
# * Cabin column are almost filled with missing values with variation in occurrence.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="JGrTdZsXKJCg" outputId="234e7180-e06a-47ee-c3f1-df7c7c4eea7c"
group = dataset_train.groupby(['Pclass', 'Survived'])
pclass_survived = group.size().unstack()
sns.heatmap(pclass_survived, annot = True, fmt ="d")
# + [markdown] id="nbLqy_sLVLF1"
#
#
# * Class 1 passengers have a higher survival chance compared to classes 2 and 3. It implies that Pclass contributes a lot to a passenger’s survival rate.
#
#
#
# + [markdown] id="n-D6yP3HZXMs"
# #Conclusion :
#
# * The columns that can be dropped are:
# PassengerId, Name, Ticket, Cabin: They are strings, cannot be categorized and don’t contribute much to the outcome.
# * Once the EDA is completed, the resultant dataset can be used for predictions.
| Data_Exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (Jupyter - cellregmap)
# language: python
# name: cellregmap_notebook
# ---
import scanpy as sc
import pandas as pd
import xarray as xr
from numpy import ones
from pandas_plink import read_plink1_bin
from numpy.linalg import cholesky
import matplotlib.pyplot as plt
import time
from limix.qc import quantile_gaussianize
import cellregmap
cellregmap
from cellregmap import run_interaction
chrom = 22
mydir = "/share/ScratchGeneral/anncuo/OneK1K/"
input_files_dir = "/share/ScratchGeneral/anncuo/OneK1K/input_files_CellRegMap/"
## sample mapping file
## this file will map cells to donors
## here, B cells only
sample_mapping_file = input_files_dir+"smf_Bcells.csv"
sample_mapping = pd.read_csv(sample_mapping_file, dtype={"individual_long": str, "genotype_individual_id": str, "phenotype_sample_id": str}, index_col=0)
sample_mapping.head()
## extract unique individuals
donors0 = sample_mapping["genotype_individual_id"].unique()
donors0.sort()
print("Number of unique donors: {}".format(len(donors0)))
# +
#### kinship file
# -
## read in GRM (genotype relationship matrix; kinship matrix)
kinship_file="/share/ScratchGeneral/anncuo/OneK1K/input_files_CellRegMap/grm_wide.csv"
K = pd.read_csv(kinship_file, index_col=0)
K.index = K.index.astype('str')
assert all(K.columns == K.index) #symmetric matrix, donors x donors
K = xr.DataArray(K.values, dims=["sample_0", "sample_1"], coords={"sample_0": K.columns, "sample_1": K.index})
K = K.sortby("sample_0").sortby("sample_1")
donors = sorted(set(list(K.sample_0.values)).intersection(donors0))
print("Number of donors after kinship intersection: {}".format(len(donors)))
## subset to relevant donors
K = K.sel(sample_0=donors, sample_1=donors)
assert all(K.sample_0 == donors)
assert all(K.sample_1 == donors)
plt.matshow(K)
## and decompose such as K = hK @ hK.T (using Cholesky decomposition)
hK = cholesky(K.values)
hK = xr.DataArray(hK, dims=["sample", "col"], coords={"sample": K.sample_0.values})
assert all(hK.sample.values == K.sample_0.values)
del K
print("Sample mapping number of rows BEFORE intersection: {}".format(sample_mapping.shape[0]))
## subsample sample mapping file to donors in the kinship matrix
sample_mapping = sample_mapping[sample_mapping["genotype_individual_id"].isin(donors)]
print("Sample mapping number of rows AFTER intersection: {}".format(sample_mapping.shape[0]))
## use sel from xarray to expand hK (using the sample mapping file)
hK_expanded = hK.sel(sample=sample_mapping["genotype_individual_id"].values)
assert all(hK_expanded.sample.values == sample_mapping["genotype_individual_id"].values)
hK_expanded.shape
# +
#### phenotype file
# -
# open anndata
my_file = "/share/ScratchGeneral/anncuo/OneK1K/expression_objects/sce"+str(chrom)+".h5ad"
adata = sc.read(my_file)
# sparse to dense
mat = adata.raw.X.todense()
# make pandas dataframe
mat_df = pd.DataFrame(data=mat.T, index=adata.raw.var.index, columns=adata.obs.index)
# turn into xr array
phenotype = xr.DataArray(mat_df.values, dims=["trait", "cell"], coords={"trait": mat_df.index.values, "cell": mat_df.columns.values})
phenotype = phenotype.sel(cell=sample_mapping["phenotype_sample_id"].values)
del mat
del mat_df
phenotype.shape
phenotype.head()
# +
#### genotype file
# -
## read in genotype file (plink format)
plink_folder = "/share/ScratchGeneral/anncuo/OneK1K/plink_files/"
plink_file = plink_folder+"plink_chr"+str(chrom)+".bed"
G = read_plink1_bin(plink_file)
G
G.shape
# +
# change this to select known eQTLs instead
# -
# Filter on specific gene-SNP pairs
# eQTL from B cells (B IN + B Mem)
Bcell_eqtl_file = input_files_dir+"fvf_Bcell_eqtls.csv"
Bcell_eqtl = pd.read_csv(Bcell_eqtl_file, index_col = 0)
Bcell_eqtl.head()
genes = Bcell_eqtl[Bcell_eqtl['chrom']==int(chrom)]['feature'].unique()
genes
# (1) gene name (feature_id)
gene_name = genes[1]
gene_name
# select SNPs for a given gene
leads = Bcell_eqtl[Bcell_eqtl['feature']==gene_name]['snp_id'].unique()
leads
#breakpoint()
G_sel = G[:,G['snp'].isin(leads)]
G_sel
# expand out genotypes from cells to donors (and select relevant donors in the same step)
G_expanded = G_sel.sel(sample=sample_mapping["individual_long"].values)
# assert all(hK_expanded.sample.values == G_expanded.sample.values)
G_expanded.shape
del G
# +
#### context file
# -
# cells (B cells only) by PCs
# C_file = input_files_dir+"PCs_Bcells.csv"
# C = pd.read_csv(C_file, index_col = 0)
C_file = input_files_dir+"PCs_Bcells.csv.pkl"
C = pd.read_pickle(C_file)
C = xr.DataArray(C.values, dims=["cell", "pc"], coords={"cell": C.index.values, "pc": C.columns.values})
C = C.sel(cell=sample_mapping["phenotype_sample_id"].values)
assert all(C.cell.values == sample_mapping["phenotype_sample_id"].values)
C.shape
C_gauss = quantile_gaussianize(C)
# select gene
y = phenotype.sel(trait=gene_name)
[(y == 0).astype(int).sum()/len(y)]
plt.hist(y)
plt.show()
y = quantile_gaussianize(y)
plt.hist(y)
plt.show()
n_cells = phenotype.shape[1]
W = ones((n_cells, 1))
del phenotype
start_time = time.time()
GG = G_expanded.values
print("--- %s seconds ---" % (time.time() - start_time))
# del G_expanded
del G_sel
# +
# myfolder = mydir+"CellRegMap_example_files/"
# import numpy as np
# np.save(myfolder+"pheno.npy",y)
# np.save(myfolder+"covs.npy",W)
# np.save(myfolder+"context.npy",C.values[:,0:10])
# np.save(myfolder+"geno.npy",GG[:,0:10])
# np.save(myfolder+"hK",hK_expanded)
# -
start_time = time.time()
pvals = run_interaction(y=y, W=W, E=C.values[:,0:10], G=GG, hK=hK_expanded)[0]
print("--- %s seconds ---" % (time.time() - start_time))
pv = pd.DataFrame({"chrom":G_expanded.chrom.values,
"pv":pvals,
"variant":G_expanded.snp.values})
pv.head()
# +
## took over an hour to run for one SNP!
# -
gene_name
folder = mydir + "CRM_interaction/Bcells_Bcell_eQTLs/"
outfilename = f"{folder}{gene_name}.tsv"
print(outfilename)
import os
if os.path.exists(outfilename):
print("File already exists, exiting")
pv.to_csv(outfilename)
| notebooks/test_CellRegMap_Bcells.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="qsthrSdtRNXL" outputId="cc140983-4a49-4bdb-b4ed-8625af3979e8"
from google.colab import drive
drive.mount('/content/gdrive')
# + colab={"base_uri": "https://localhost:8080/"} id="wbvMlHd_QwMG" outputId="18edf15e-dabe-43f3-cd60-616f7b0c1df5"
# !git clone https://github.com/ultralytics/yolov5 # clone repo
# !pip install -qr yolov5/requirements.txt # install dependencies (ignore errors)
# %cd yolov5
import torch
from IPython.display import Image, clear_output # to display images
from utils.google_utils import gdrive_download # to download models/datasets
clear_output()
print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))
# + colab={"base_uri": "https://localhost:8080/"} id="Knxi2ncxWffW" outputId="b021ec4b-0e8d-4ac1-a57f-4bbb61f1564e"
# Export code snippet and paste here
# %cd /content
# !curl -L "https://app.roboflow.com/ds/OvhylNHfiO?key=m3aBcpXy5E" > roboflow.zip; unzip roboflow.zip; rm roboflow.zip
# + id="ZZ3DmmGQztJj"
# this is the YAML file Roboflow wrote for us that we're loading into this notebook with our data
# %cat data.yaml
# + id="dOPn9wjOAwwK"
# define number of classes based on YAML
import yaml
with open("data.yaml", 'r') as stream:
num_classes = str(yaml.safe_load(stream)['nc'])
# + id="1Rvt5wilnDyX"
#this is the model configuration we will use for our tutorial
# %cat /content/yolov5/models/yolov5s.yaml
# + id="t14hhyqdmw6O"
#customize iPython writefile so we can write variables
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def writetemplate(line, cell):
with open(line, 'w') as f:
f.write(cell.format(**globals()))
# + id="uDxebz13RdRA"
# %%writetemplate /content/yolov5/models/custom_yolov5s.yaml
# parameters
nc: {num_classes} # number of classes
depth_multiple: 0.33 # model depth multiple
width_multiple: 0.50 # layer channel multiple
# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32
# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, BottleneckCSP, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, BottleneckCSP, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, BottleneckCSP, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, BottleneckCSP, [1024, False]], # 9
]
# YOLOv5 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, BottleneckCSP, [512, False]], # 13
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small)
[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium)
[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large)
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]
# + [markdown] id="VUOiNLtMP5aG"
# # Train Custom YOLOv5 Detector
#
# ### Next, we'll fire off training!
#
#
# Here, we are able to pass a number of arguments:
# - **img:** define input image size
# - **batch:** determine batch size
# - **epochs:** define the number of training epochs. (Note: often, 3000+ are common here!)
# - **data:** set the path to our yaml file
# - **cfg:** specify our model configuration
# - **weights:** specify a custom path to weights. (Note: you can download weights from the Ultralytics Google Drive [folder](https://drive.google.com/open?id=1Drs_Aiu7xx6S-ix95f9kNsA6ueKRpN2J))
# - **name:** result names
# - **nosave:** only save the final checkpoint
# - **cache:** cache images for faster training
# + id="LKrUi9zb1GHu"
# %cd /content/yolov5/weights
# !gdown https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5s.pt
# + id="1NcFxRcFdJ_O"
# train yolov5s on custom data for 100 epochs
# time its performance
# %%time
# %cd /content/yolov5/
# !python train.py --img 704 --batch 32 --epochs 1000 --data '../data.yaml' --cfg ./models/custom_yolov5s.yaml --weights /content/yolov5/weights/yolov5s.pt --cache
# + id="mQLhApgsRVBq"
# %cp /content/yolov5/runs/exp0/weights/best.pt /content/gdrive/My\ Drive
# + [markdown] id="N3qM6T0W53gh"
# #Run Inference With Trained Weights
# Run inference with a pretrained checkpoint on contents of `test/images` folder downloaded from Roboflow.
# + id="wKa69z0kOHcS"
#test data
from IPython.display import clear_output
# %cd /content
# !gdown --id 1OUnWXuYLQao9AfviNSIDcwdqKR9Tzp7j
# !unzip test.zip; rm test.zip;
clear_output()
# + id="3gcPTBd4OQLX"
#traffic-names
# %cd /content/yolov5
# !gdown --id 1w7Zpw2hmr1p9VRMbBFfKH2D5BPRk04Jj
# + id="RjadLbrQO4BB"
#util.py
# %cd /content/yolov5/utils
# !gdown --id 1evRAmdo3LpKicWYjc59stlnLSilQJoia
# + id="9nmZZnWOgJ2S"
# when we ran this, we saw .007 second inference time. That is 140 FPS on a TESLA P100!
# use the best weights!
# %cd /content/yolov5/
# !python detect.py --weights /content/yolov5/runs/train/exp0/weights/best.pt --img 1024 --conf 0.4 --source /content/test
# + id="WslaF8IVKgPm"
# %cd /content/yolov5/
# !python test.py --weights /content/yolov5/runs/train/exp0/weights/best.pt --data /content/data.yaml --img 1024 --augment
# + id="dByKJEmGpyV7"
from models import *
from utils.datasets import *
from utils.utils import *
from utils.google_utils import *
def detect(save_img=False):
imgsz = opt.img_size
out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt
# Initialize
device = torch_utils.select_device(opt.device)
if os.path.exists(out):
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
# Initialize model
#model = Darknet(opt.cfg, imgsz)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model = torch.load(weights, map_location=device)['model'].float()
#model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
load_darknet_weights(model, weights)
# Eval mode
model.to(device).eval()
# Half precision
half = half and device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Set Dataloader
vid_path, vid_writer = None, None
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = load_classes(opt.names)
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img.float()) if device.type != 'cpu' else None # run once
results=[]
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = torch_utils.time_synchronized()
pred = model(img, augment=opt.augment)[0]
t2 = torch_utils.time_synchronized()
# to float
if half:
pred = pred.float()
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres,
multi_label=False, classes=opt.classes, agnostic=opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections for image i
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
#print(p)
s += '%gx%g ' % img.shape[2:] # print string
#print(s)
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if det is not None and len(det):
# Rescale boxes from imgsz to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
xmin = []
ymin = []
xmax = []
ymax = []
scores = []
labels_value=[]
image_ids=[]
# Write results
for *xyxy, conf, cls in det:
if save_txt: # Write to file
conf_score = '%.2f' % (conf)
label_with_cls = '%s' % (names[int(cls)])
labels_value.append(label_with_cls)
xmin.append(int(xyxy[0]))
ymin.append(int(xyxy[1]))
xmax.append(int(xyxy[2]))
ymax.append(int(xyxy[3]))
scores.append(conf_score)
image_ids.append(save_path)
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
with open(save_path[:save_path.rfind('.')] + '.txt', 'a') as file:
file.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
if save_img or view_img: # Add bbox to image
label = '%s %.2f' % (names[int(cls)], conf)
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)])
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Stream results
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
result = {
'image_id': image_ids,
'score': scores,
'class': labels_value,
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax
}
results.append(result)
if save_txt or save_img:
print('Results saved to %s' % os.getcwd() + os.sep + out)
print('Done. (%.3fs)' % (time.time() - t0))
return results
# + id="mx99eaSBp0zc"
class opt:
cfg='/content/yolov5/models/yolov5x.yaml'
names='/content/yolov5/traffic.names'
weights='/content/yolov5/runs/train/exp0/weights/best.pt'
source='/content/test'
save_txt=True
output='output'
classes=False
img_size=1024
conf_thres=0.3
iou_thres=0.5
fourcc='mp4v'
half=False
device=''
view_img=False
agnostic_nms=False
augment=False
# + id="-RuxQhdvp0t-"
# predict results
with torch.no_grad():
res=detect()
# + id="4LSBds_Zp7lu"
import pandas as pd
import numpy as np
append_data=[]
for i in range(len(res)):
df = pd.DataFrame(res[i], columns = ['image_id','class','score','xmin','ymin','xmax','ymax'])
append_data.append(df)
finl_results=pd.concat(append_data)
finl_results.image_id = [x.strip('output/') for x in finl_results.image_id]
finl_results['width'] = 1024
finl_results['height'] = 1024
# + id="ad3RPqXLqBZ3"
finl_results.head()
# + id="BGUQJQZBMvnh"
finl_results.shape
# + id="_GY82aOcqBV3"
finl_results.to_csv('submission.csv', index=False)
# + id="XuphXSTiMu4y"
# + id="1x_wg3VeiXMW"
# %cp /content/yolov5/submission.csv /content/gdrive/My\ Drive
| YOLOv5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py3.7
# language: python
# name: py3.7
# ---
# # Exercise 04
# ## Creating a simple model
# In this exercise we will create a simple logistic regression model from the scikit learn package.
# We will then create some model evaluation metrics and test the predictions against those model evaluation metrics.
# Let's load the feature data from the first excerice.
#
# We should always approach training any machine learning model training as an iterative approach, beginning first with a simple model, and using model evaluation metrics to evaluate the performance of the models.
import pandas as pd
feats = pd.read_csv('../data/OSI_feats_e3.csv')
target = pd.read_csv('../data/OSI_target_e2.csv')
# We first begin by creating a test and train dataset. We will train the data using the training dataset and evaluate the performance of the model on the test dataset. Later in the lesson we will add validation datasets that will help us tune the hyperparameters.
#
# We will use a test_size = 0.2 which means that 20% of the data will be reserved for testing
from sklearn.model_selection import train_test_split
test_size = 0.2
random_state = 42
X_train, X_test, y_train, y_test = train_test_split(feats, target, test_size=test_size, random_state=random_state)
# Let's make sure our dimensions are correct
print(f'Shape of X_train: {X_train.shape}')
print(f'Shape of y_train: {y_train.shape}')
print(f'Shape of X_test: {X_test.shape}')
print(f'Shape of y_test: {y_test.shape}')
# We fit our model first by instantiating it, then by fitting the model to the training data
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=42, max_iter=10000)
model.fit(X_train, y_train['Revenue'])
# To test the model performance we will predict the outcome on the test features (X_test), and compare those outcomes to real values (y_test)
y_pred = model.predict(X_test)
# Now let's compare against the true values. Let's start by using accuracy, accuracy is defined as the propotion of correct predictions out of the total predictions.
from sklearn import metrics
accuracy = metrics.accuracy_score(y_pred=y_pred, y_true=y_test)
print(f'Accuracy of the model is {accuracy*100:.4f}%')
# 87.0641% - that's not bad with for a simple model with little feature engineering!
# ### Other evaluation metrics
#
# Other common metrics in classification models are precision, recall, and f1-score.
# Recall is defined as the proportion of correct positive predictions relative to total true postive values. Precision is defined as the proportion of correct positive predictions relative to total predicted postive values. F1 score is a combination of precision and recall, defined as 2 times the product of precision and recall, divided by the sum of the two.
#
# It's useful to use these other evaluation metrics other than accuracy when the distribution of true and false values. We want these values to be as close to 1.0 as possible.
precision, recall, fscore, _ = metrics.precision_recall_fscore_support(y_pred=y_pred, y_true=y_test, average='binary')
print(f'Precision: {precision:.4f}\nRecall: {recall:.4f}\nfscore: {fscore:.4f}')
# We can see here that while the accuracy is high, the recall is much lower, which means that we're missing most of the true positive values.
# ### Feature importances
#
#
# We can look at which features are important by looking at the magnitude of the coefficients. Those with a larger coefficients will have a greater contribution to the result. Those with a positive value will make the result head toward the true result, that the customer will not subscribe. Features with a negative value for the coefficient will make the result heads towards a false result, that the customer will not subscribe to the product.
#
# As a note, since the features were not normalized (having the same scale), the values for these coefficients shouls serve as a rough guide as to observe which features add predictive power.
coef_list = [f'{feature}: {coef}' for coef, feature in sorted(zip(model.coef_[0], X_train.columns.values.tolist()))]
for item in coef_list:
print(item)
# We can see from the coefficients that a whether or not the traffic type is a key indicator, as well as which month the user browsed in.
| Exercise04/Exercise04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <img src="images/dask_horizontal.svg" align="right" width="30%">
# # Distributed, Advanced
# ## Distributed futures
from dask.distributed import Client
c = Client(n_workers=4)
c.cluster
# In the previous chapter, we showed that executing a calculation (created using delayed) with the distributed executor is identical to any other executor. However, we now have access to additional functionality, and control over what data is held in memory.
#
# To begin, the `futures` interface (derived from the built-in `concurrent.futures`) allows map-reduce like functionality. We can submit individual functions for evaluation with one set of inputs, or evaluated over a sequence of inputs with `submit()` and `map()`. Notice that the call returns immediately, giving one or more *futures*, whose status begins as "pending" and later becomes "finished". There is no blocking of the local Python session.
# Here is the simplest example of `submit` in action:
# +
def inc(x):
return x + 1
fut = c.submit(inc, 1)
fut
# -
# We can re-execute the following cell as often as we want as a way to poll the status of the future. This could of course be done in a loop, pausing for a short time on each iteration. We could continue with our work, or view a progressbar of work still going on, or force a wait until the future is ready.
#
# In the meantime, the `status` dashboard (link above next to the Cluster widget) has gained a new element in the task stream, indicating that `inc()` has completed, and the progress section at the problem shows one task complete and held in memory.
fut
# Possible alternatives you could investigate:
# ```python
# from dask.distributed import wait, progress
# progress(fut)
# ```
# would show a progress bar in *this* notebook, rather than having to go to the dashboard. This progress bar is also asynchronous, and doesn't block the execution of other code in the meanwhile.
#
# ```python
# wait(fut)
# ```
# would block and force the notebook to wait until the computation pointed to by `fut` was done. However, note that the result of `inc()` is sitting in the cluster, it would take **no time** to execute the computation now, because Dask notices that we are asking for the result of a computation it already knows about. More on this later.
# grab the information back - this blocks if fut is not ready
c.gather(fut)
# equivalent action when only considering a single future
# fut.result()
# Here we see an alternative way to execute work on the cluster: when you submit or map with the inputs as futures, the *computation moves to the data* rather than the other way around, and the client, in the local Python session, need never see the intermediate values. This is similar to building the graph using delayed, and indeed, delayed can be used in conjunction with futures. Here we use the delayed object `total` from before.
# +
# Some trivial work that takes time
# repeated from the Distributed chapter.
from dask import delayed
import time
def inc(x):
time.sleep(5)
return x + 1
def dec(x):
time.sleep(3)
return x - 1
def add(x, y):
time.sleep(7)
return x + y
x = delayed(inc)(1)
y = delayed(dec)(2)
total = delayed(add)(x, y)
# -
# notice the difference from total.compute()
# notice that this cell completes immediately
fut = c.compute(total)
fut
c.gather(fut) # waits until result is ready
# ### `Client.submit`
#
# `submit` takes a function and arguments, pushes these to the cluster, returning a *Future* representing the result to be computed. The function is passed to a worker process for evaluation. Note that this cell returns immediately, while computation may still be ongoing on the cluster.
fut = c.submit(inc, 1)
fut
# This looks a lot like doing `compute()`, above, except now we are passing the function and arguments directly to the cluster. To anyone used to `concurrent.futures`, this will look familiar. This new `fut` behaves the same way as the one above. Note that we have now over-written the previous definition of `fut`, which will get garbage-collected, and, as a result, that previous result is released by the cluster
#
# ### Exercise: Rebuild the above delayed computation using `Client.submit` instead
#
# The arguments passed to `submit` can be futures from other submit operations or delayed objects. The former, in particular, demonstrated the concept of *moving the computation to the data* which is one of the most powerful elements of programming with Dask.
#
# +
# Your code here
# + jupyter={"source_hidden": true}
x = c.submit(inc, 1)
y = c.submit(dec, 2)
total = c.submit(add, x, y)
print(total) # This is still a future
c.gather(total) # This blocks until the computation has finished
# -
# Each futures represents a result held, or being evaluated by the cluster. Thus we can control caching of intermediate values - when a future is no longer referenced, its value is forgotten. In the solution, above, futures are held for each of the function calls. These results would not need to be re-evaluated if we chose to submit more work that needed them.
#
# We can explicitly pass data from our local session into the cluster using `scatter()`, but usually better is to construct functions that do the loading of data within the workers themselves, so that there is no need to serialise and communicate the data. Most of the loading functions within Dask, sudh as `dd.read_csv`, work this way. Similarly, we normally don't want to `gather()` results that are too big in memory.
#
# The [full API](http://distributed.readthedocs.io/en/latest/api.html) of the distributed scheduler gives details of interacting with the cluster, which remember, can be on your local machine or possibly on a massive computational resource.
# The futures API offers a work submission style that can easily emulate the map/reduce paradigm (see `c.map()`) that may be familiar to many people. The intermediate results, represented by futures, can be passed to new tasks without having to bring the pull locally from the cluster, and new work can be assigned to work on the output of previous jobs that haven't even begun yet.
#
# Generally, any Dask operation that is executed using `.compute()` can be submitted for asynchronous execution using `c.compute()` instead, and this applies to all collections. Here is an example with the calculation previously seen in the Bag chapter. We have replaced the `.compute()` method there with the distributed client version, so, again, we could continue to submit more work (perhaps based on the result of the calculation), or, in the next cell, follow the progress of the computation. A similar progress-bar appears in the monitoring UI page.
# %run prep.py -d accounts
# +
import dask.bag as db
import os
import json
filename = os.path.join('data', 'accounts.*.json.gz')
lines = db.read_text(filename)
js = lines.map(json.loads)
f = c.compute(js.filter(lambda record: record['name'] == 'Alice')
.pluck('transactions')
.flatten()
.pluck('amount')
.mean())
# -
from dask.distributed import progress
# note that progress must be the last line of a cell
# in order to show up
progress(f)
# get result.
c.gather(f)
# release values by deleting the futures
del f, fut, x, y, total
# ### Persist
# Considering which data should be loaded by the workers, as opposed to passed, and which intermediate values to persist in worker memory, will in many cases determine the computation efficiency of a process.
#
# In the example here, we repeat a calculation from the Array chapter - notice that each call to `compute()` is roughly the same speed, because the loading of the data is included every time.
# %run prep.py -d random
# +
import h5py
import os
f = h5py.File(os.path.join('data', 'random.hdf5'), mode='r')
dset = f['/x']
import dask.array as da
x = da.from_array(dset, chunks=(1000000,))
# %time x.sum().compute()
# %time x.sum().compute()
# -
# If, instead, we persist the data to RAM up front (this takes a few seconds to complete - we could `wait()` on this process), then further computations will be much faster.
# changes x from a set of delayed prescriptions
# to a set of futures pointing to data in RAM
# See this on the UI dashboard.
x = c.persist(x)
# %time x.sum().compute()
# %time x.sum().compute()
# Naturally, persisting every intermediate along the way is a bad idea, because this will tend to fill up all available RAM and make the whole system slow (or break!). The ideal persist point is often at the end of a set of data cleaning steps, when the data is in a form which will get queried often.
# **Exercise**: how is the memory associated with `x` released, once we know we are done with it?
# ## Asynchronous computation
# <img style="float: right;" src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/32/Rosenbrock_function.svg/450px-Rosenbrock_function.svg.png" height=200 width=200>
#
# One benefit of using the futures API is that you can have dynamic computations that adjust as things progress. Here we implement a simple naive search by looping through results as they come in, and submit new points to compute as others are still running.
#
# Watching the [diagnostics dashboard](../../9002/status) as this runs you can see computations are being concurrently run while more are being submitted. This flexibility can be useful for parallel algorithms that require some level of synchronization.
#
# Lets perform a very simple minimization using dynamic programming. The function of interest is known as Rosenbrock:
# +
# a simple function with interesting minima
import time
def rosenbrock(point):
"""Compute the rosenbrock function and return the point and result"""
time.sleep(0.1)
score = (1 - point[0])**2 + 2 * (point[1] - point[0]**2)**2
return point, score
# -
# Initial setup, including creating a graphical figure. We use Bokeh for this, which allows for dynamic update of the figure as results come in.
# +
from bokeh.io import output_notebook, push_notebook
from bokeh.models.sources import ColumnDataSource
from bokeh.plotting import figure, show
import numpy as np
output_notebook()
# set up plot background
N = 500
x = np.linspace(-5, 5, N)
y = np.linspace(-5, 5, N)
xx, yy = np.meshgrid(x, y)
d = (1 - xx)**2 + 2 * (yy - xx**2)**2
d = np.log(d)
p = figure(x_range=(-5, 5), y_range=(-5, 5))
p.image(image=[d], x=-5, y=-5, dw=10, dh=10, palette="Spectral11");
# -
# We start off with a point at (0, 0), and randomly scatter test points around it. Each evaluation takes ~100ms, and as result come in, we test to see if we have a new best point, and choose random points around that new best point, as the search box shrinks.
#
# We print the function value and current best location each time we have a new best value.
# +
from dask.distributed import as_completed
from random import uniform
scale = 5 # Intial random perturbation scale
best_point = (0, 0) # Initial guess
best_score = float('inf') # Best score so far
startx = [uniform(-scale, scale) for _ in range(10)]
starty = [uniform(-scale, scale) for _ in range(10)]
# set up plot
source = ColumnDataSource({'x': startx, 'y': starty, 'c': ['grey'] * 10})
p.circle(source=source, x='x', y='y', color='c')
t = show(p, notebook_handle=True)
# initial 10 random points
futures = [c.submit(rosenbrock, (x, y)) for x, y in zip(startx, starty)]
iterator = as_completed(futures)
for res in iterator:
# take a completed point, is it an improvement?
point, score = res.result()
if score < best_score:
best_score, best_point = score, point
print(score, point)
x, y = best_point
newx, newy = (x + uniform(-scale, scale), y + uniform(-scale, scale))
# update plot
source.stream({'x': [newx], 'y': [newy], 'c': ['grey']}, rollover=20)
push_notebook(document=t)
# add new point, dynamically, to work on the cluster
new_point = c.submit(rosenbrock, (newx, newy))
iterator.add(new_point) # Start tracking new task as well
# Narrow search and consider stopping
scale *= 0.99
if scale < 0.001:
break
point
# -
# ## Debugging
# When something goes wrong in a distributed job, it is hard to figure out what the problem was and what to do about it. When a task raises an exception, the exception will show up when that result, or other result that depend upon it, is gathered.
#
# Consider the following delayed calculation to be computed by the cluster. As usual, we get back a future, which the cluster is working on to compute (this happens very slowly for the trivial procedure).
# +
@delayed
def ratio(a, b):
return a // b
ina = [5, 25, 30]
inb = [5, 5, 6]
out = delayed(sum)([ratio(a, b) for (a, b) in zip(ina, inb)])
f = c.compute(out)
f
# -
# We only get to know what happened when we gather the result (this is also true for `out.compute()`, except we could not have done other stuff in the meantime). For the first set of inputs, it works fine.
c.gather(f)
# But if we introduce bad input, an exception is raised. The exception happens in `ratio`, but only comes to our attention when calculating the sum.
# + tags=["raises-exception"]
ina = [5, 25, 30]
inb = [5, 0, 6]
out = delayed(sum)([ratio(a, b) for (a, b) in zip(ina, inb)])
f = c.compute(out)
c.gather(f)
# -
# The display in this case makes the origin of the exception obvious, but this is not always the case. How should this be debugged, how would we go about finding out the exact conditions that caused the exception?
#
# The first step, of course, is to write well-tested code which makes appropriate assertions about its input and clear warnings and error messages when something goes wrong. This applies to all code.
#
# The most typical thing to do is to execute some portion of the computation in the local thread, so that we can run the Python debugger and query the state of things at the time that the exception happened. Obviously, this cannot be performed on the whole data-set when dealing with Big Data on a cluster, but a suitable sample will probably do even then.
# + tags=["raises-exception"]
import dask
with dask.config.set(scheduler="sync"):
# do NOT use c.compute(out) here - we specifically do not
# want the distributed scheduler
out.compute()
# +
# uncomment to enter post-mortem debugger
# # %debug
# -
# The trouble with this approach is that Dask is meant for the execution of large datasets/computations - you probably can't simply run the whole thing
# in one local thread, else you wouldn't have used Dask in the first place. So the code above should only be used on a small part of the data that also exihibits the error.
# Furthermore, the method will not work when you are dealing with futures (such as `f`, above, or after persisting) instead of delayed-based computations.
#
# As an alternative, you can ask the scheduler to analyze your calculation and find the specific sub-task responsible for the error, and pull only it and its dependnecies locally for execution.
# + tags=["raises-exception"]
c.recreate_error_locally(f)
# +
# uncomment to enter post-mortem debugger
# # %debug
# -
# Finally, there are errors other than exceptions, when we need to look at the state of the scheduler/workers. In the standard "LocalCluster" we started, we
# have direct access to these.
[(k, v.state) for k, v in c.cluster.scheduler.tasks.items() if v.exception is not None]
| Dask/06_distributed_advanced.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
from aocutils.maze import dijkstra
from aocutils.grid import arr_neighbors
import numpy as np
def inc(grid,i):
g = grid.copy()
for j in range(i):
g += 1
g[g>9] = 1
return g
# generating the grid
grid = np.genfromtxt('input.txt', delimiter=1)
hor = np.hstack([inc(grid,i) for i in range(5)])
grid = np.vstack([inc(hor,i) for i in range(5)])
# making a dict of points with their neighbors and corresponding weights
neigh = arr_neighbors(grid)
neigh = {pnt: [(n,grid[n]) for n in neighborset] for pnt, neighborset in neigh.items()}
# dijkstra
dest = len(grid) -1, len(grid[0]) -1
path, length = dijkstra(neigh, (0,0), dest)
length
# -
| advent_of_code_2021/day 15 dijkstra maze/solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tidal Consituent Extraction Tutorial
# ## Introduction
#
# This tutorial demonstrates the use of the `hamonica` Python library to extract tidal constituent data. `harmonica` can provide the amplitude, phase, and speed of specified constituents at specified point locations. Various nodal factors of a constituent can also be obtained at a specifed time. The resolution of the datasets and the supported constituents vary depending on the tidal model used. `harmonica` currently supports the TPXO (v7-9), ADCIRC (v2015), LeProvost, and FES2014 tidal models. Resources for TPXO, ADCIRC, and LeProvost are automatically retrieved if not found (requires Internet connection). Due to licensing restrictions, the FES2014 data files must already exist locally.
# ## Example - Simple Points
#
# The following example extracts tidal components of two constituents at two ocean locations. Extracting nodal factors of the constituents at a specified time is also shown.
# To begin, import the necessary modules.
import datetime
from harmonica.tidal_constituents import Constituents
# Create a list of the constituents of interest. See the classes in `resource.py` for a listing of the models' supported constituents and their expected codes. Universally compatiable constituents are used here.
constituents = ['M2', 'S2']
# Next, construct the tidal constituent extractor object. The TPXO8 model is specified at construction here. This argument is optional and defaults to the TPXO9 model.
extractor = Constituents(model='tpxo8')
# The next section of code demonstrates using the interface to get amplitudes, frequencies, speeds, earth tidal reduction factors, equilibrium arguments, and nodal factors for specified constituents at a specified time. The astronomical equations used to calculate these variables do not depend on the tidal model being used, so the values are always the same regardless of the extractor's current tidal model.
# +
# Get astronomical nodal factor data (not dependent on the tidal model)
nodal_factors = extractor.get_nodal_factor(names=constituents, timestamp=datetime.datetime(2018, 8, 30, 15))
print(nodal_factors.to_string())
# Expected output:
# amplitude frequency speed earth_tide_reduction_factor equilibrium_argument nodal_factor
# M2 0.242334 0.000141 28.984104 0.693 345.201515 1.087974
# S2 0.112841 0.000145 30.000000 0.693 90.000000 1.000000
# -
# No external resources have been required up to this point. The remainder of the tutorial uses the various tidal models to extract consituent data at point locations. If the required resources do not exist locally at this time, `harmonica` will attempt to download the files from the Internet (with the exception of the FES2014 model). `__init__.py` contains a `config` dict with the default resource locations. Set the `pre_existing_data_dir` variable to change the directory `harmonica` searches for existing resources. Set the `data_dir` variable to change the directory `harmonica` saves downloaded resources. Resource files are expected to be in subfolders with the name of the model. If a required resource exists in either directory, it will not be downloaded.
# Create a list of point locations where tidal harmonic components will be extracted. The first location is in the Northwest Atlantic Ocean, and the second point is in the Northeast Pacific Ocean. Locations should be specified as tuples of latitude and longitude degrees. Latitude coordinates should be in the \[-90.0, 90.0\] range. Longitude coordinates should in either the \[0.0, 360.0\] or \[-180.0, 180.0\] range.
# Need to be in (lat, lon), not (x, y)
locations = [
(39.74, 285.93), # (39.74, -74.07),
(46.18, -124.38), # (46.18, 235.62),
]
# The next lines of code extract constituent amplitude, phase, and speed using the TPXO8 model specified at construction. A `Pandas.DataFrame` is returned for each of the requested locations. Speeds of phase change published by NOAA (https://tidesandcurrents.noaa.gov) are provided for convenience but are spatially constant. To ease comparison with other model results in this tutorial, the `positive_ph` argument has been set to `True`. This ensures all ouput phases are positive.
# +
tpxo_comps = extractor.get_components(locations, constituents, positive_ph=True)
for loc, comps in zip(locations, tpxo_comps.data):
print(loc)
print(comps.sort_index().to_string() + '\n')
# Expected output:
# (39.74, 285.93)
# amplitude phase speed
# M2 0.560701 352.265551 28.984104
# S2 0.106576 18.615143 30.000000
#
# (46.18, -124.38)
# amplitude phase speed
# M2 0.910663 234.117743 28.984104
# S2 0.253107 260.181916 30.000000
# -
# The tidal model can be switched with every call to `get_components` by specifying the `model` argument. Switch to the ADCIRC v2015 model and compare the results.
# +
adcirc_comps = extractor.get_components(locations, constituents, model='adcirc2015')
for loc, comps in zip(locations, adcirc_comps.data):
print(loc)
print(comps.sort_index().to_string() + '\n')
# Expected output:
# (39.74, -74.07)
# amplitude phase speed
# M2 0.554417 352.729485 28.984104
# S2 0.106317 16.530658 30.000000
#
# (46.18, -124.38)
# amplitude phase speed
# M2 0.944207 230.498719 28.984104
# S2 0.264533 256.892259 30.000000
# -
# Repeat the extraction using the LeProvost model.
# +
leprovost_comps = extractor.get_components(locations, constituents, model='leprovost')
for loc, comps in zip(locations, leprovost_comps.data):
print(loc)
print(comps.sort_index().to_string() + '\n')
# Expected output:
# (39.74, -74.07)
# amplitude phase speed
# M2 0.589858 353.748502 28.984104
# S2 0.083580 20.950538 30.000000
#
# (46.18, -124.38)
# amplitude phase speed
# M2 0.858488 232.029922 28.984104
# S2 0.242000 258.787649 30.000000
# -
# To use the FES2014 model, either change the `__init__.config` variables or copy the data files to one of the appropriate locations. Files should be in a `fes2014` subfolder of the referenced directory. See the class `FES2014Resources` in `resource.py` for a listing of expected filenames. Only the height NetCDF datasets are required.
# +
fes2014_comps = extractor.get_components(locations, constituents, model='fes2014')
for loc, comps in zip(locations, fes2014_comps.data):
print(loc)
print(comps.sort_index().to_string() + '\n')
# Expected output:
# (39.74, 285.93)
# amplitude phase speed
# M2 0.574708 354.674980 28.984104
# S2 0.111234 15.286627 30.000000
#
# (46.18, 235.62)
# amplitude phase speed
# M2 0.903397 229.333500 28.984104
# S2 0.249951 255.765785 30.000000
# -
| tutorials/python_api/TidalConstituentExtractionTutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pip install pandas
pip install numpy
import pandas as pd
import numpy as np
zillow = pd.read_csv("Datasets/Zip_ZORI_AllHomesPlusMultifamily_SSA.csv")
zillow.head()
zillow.isna().sum()
zillow.shape
| Datasets/Data/Zillow_RentalData/zillow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>GetsDrawn DotCom</h1>
# This is a python script to generate the website GetsDrawn. It takes data from /r/RedditGetsDrawn and makes something awesome.
#
# The script has envolved and been rewritten several times.
#
# The first script for rgdsnatch was written after I got banned from posting my artwork on /r/RedditGetsDrawn. The plan was to create a new site that displayed stuff from /r/RedditGetsDrawn.
#
# Currently it gets the most recent 25 items on redditgetsdrawn, and saves it to a folder. The script looks at the newest 25 reference photos on RedditGetsDrawn. It focuses only on jpeg/png images and ignores and links to none .jpg or .png ending files.
# It is needed to instead of ignoring them files - get the image or images in some cases, from the link.
# The photos are always submitted from imgur.
# Still filter out the i.imgur files, but take the links and filter them through a python imgur module returning the .jpeg or .png files.
#
#
# This is moving forward from rgdsnatch.py because I am stuck on it.
#
# TODO
#
# Fix the links that don't link to png/jpeg and link to webaddress.
# Needs to get the images that are at that web address and embed them.
#
# Display artwork submitted under the images.
#
# Upload artwork to user. Sends them a message on redditgetsdrawn with links.
#
# More pandas
#
# Saves reference images to imgs/year/month/day/reference/username-reference.png
#
# Saves art images to imgs/year/month/day/art/username-line-bw-colour.png
#
# Creates index.html file with:
# Title of site and logo: GetsDrawn
# Last updated date and time.
#
# Path of image file /imgs/year/month/day/username-reference.png.
# (This needs changed to just their username).
#
# Save off .meta data from reddit of each photo, saving it to reference folder.
# username-yrmnthday.meta - contains info such as author, title, upvotes, downvotes.
# Currently saving .meta files to a meta folder - along side art and reference.
#
# Folder sorting system of files.
# websitename/index.html-style.css-imgs/YEAR(15)-MONTH(2)-DAY(4)/art-reference-meta
# Inside art folder
# Currently it generates USERNAME-line/bw/colour.png 50/50 white files. Maybe should be getting art replies from reddit?
#
# Inside reference folder
# Reference fold is working decent.
# it creates USERNAME-reference.png / jpeg files.
#
# Currently saves username-line-bw-colour.png to imgs folder. Instead get it to save to imgs/year/month/day/usernames.png.
# Script checks the year/month/day and if folder isnt created, it creates it. If folder is there, exit.
# Maybe get the reference image and save it with the line/bw/color.pngs
#
# The script now filters the jpeg and png image and skips links to imgur pages. This needs to be fixed by getting the images from the imgur pages.
# It renames the image files to the redditor username followed by a -reference tag (and ending with png of course).
# It opens these files up with PIL and checks the sizes.
# It needs to resize the images that are larger than 800px to 800px.
# These images need to be linked in the index.html instead of the imgur altenatives.
#
# Instead of the jpeg/png files on imgur they are downloaded to the server with this script.
#
# Filter through as images are getting downloaded and if it has been less than certain time or if the image has been submitted before
#
# Extending the subreddits it gets data from to cycle though a list, run script though list of subreddits.
#
# Browse certain days - Current day by default but option to scroll through other days.
#
# Filters - male/female/animals/couples etc
# Function that returns only male portraits.
# tags to add to photos.
# Filter images with tags
#
#
#
import os
import requests
from bs4 import BeautifulSoup
import re
import json
import time
import praw
import dominate
from dominate.tags import *
from time import gmtime, strftime
#import nose
#import unittest
import numpy as np
import pandas as pd
from pandas import *
from PIL import Image
from pprint import pprint
#import pyttsx
import shutil
import getpass
import random
from TwitterFollowBot import TwitterBot
my_bot = TwitterBot()
hosnam = getpass.getuser()
gtsdrndir = ('/home/' + hosnam + '/getsdrawndotcom/')
gtsdrndir
if os.path.isdir(gtsdrndir) == True:
print ('its true')
else:
print ('its false')
os.mkdir(gtsdrndir)
os.chdir(gtsdrndir)
r = praw.Reddit(user_agent='getsdrawndotcom')
# +
#getmin = r.get_redditor('itwillbemine')
# +
#mincom = getmin.get_comments()
# +
#engine = pyttsx.init()
#engine.say('The quick brown fox jumped over the lazy dog.')
#engine.runAndWait()
# +
#shtweet = []
# +
#for mi in mincom:
# print mi
# shtweet.append(mi)
# -
bodycom = []
bodyicv = dict()
# +
#beginz = pyttsx.init()
# +
#for shtz in shtweet:
# print shtz.downs
# print shtz.ups
# print shtz.body
# print shtz.replies
#beginz.say(shtz.author)
#beginz.say(shtz.body)
#beginz.runAndWait()
# bodycom.append(shtz.body)
#bodyic
# +
#bodycom
# -
getnewr = r.get_subreddit('redditgetsdrawn')
rdnew = getnewr.get_new()
lisrgc = []
lisauth = []
for uz in rdnew:
#print uz
lisrgc.append(uz)
gtdrndic = dict()
imgdir = (gtsdrndir + 'imgs')
imgdir
if os.path.isdir(imgdir) == True:
print ('its true')
else:
print ('its false')
os.mkdir(imgdir)
artlist = os.listdir(imgdir)
from time import time
# +
yearz = strftime("%y", gmtime())
monthz = strftime("%m", gmtime())
dayz = strftime("%d", gmtime())
#strftime("%y %m %d", gmtime())
# +
yrzpat = (imgdir + '/' + yearz)
monzpath = (yrzpat + '/' + monthz)
dayzpath = (monzpath + '/' + dayz)
rmgzdays = (dayzpath + '/reference')
imgzdays = (dayzpath + '/art')
metzdays = (dayzpath + '/meta')
repathz = (imgdir + '/' + yearz + '/' + monthz + '/' + dayz + '/')
# -
repathz
dayzpath
imgzdays
repathz
def ospacheck():
if os.path.isdir(imgdir + yearz) == True:
print ('its true')
else:
print ('its false')
os.mkdir(imgdir + yearz)
ospacheck()
# +
#if os.path.isdir(imgzdir + yearz) == True:
# print 'its true'
#else:
# print 'its false'
# os.mkdir(imgzdir + yearz)
# -
lizmon = ['monzpath', 'dayzpath', 'imgzdays', 'rmgzdays', 'metzdays']
# Something is wrong with the script and it's no longer creating these dir in the correct folder. How did this break?
# Fixed that but problems with it
# Getting error:
# OSError: [Errno 17] File exists: '/home/wcmckee/getsdrawndotcom/imgs/15/01'
# If the file exists it should be skipping over it, thats why it has the os.path.isdir == True:
# print its true
# else
# print its false, and make the dir
if os.path.isdir(monzpath) == True:
print ('its true')
else:
print ('its false')
#os.mkdir('/home/wcmckee/getsdrawndotcom/' + monzpath)
# +
if os.path.isdir(imgzdays) == True:
print ('its true')
else:
print ('its false')
os.mkdir(imgzdays)
if os.path.isdir(rmgzdays) == True:
print ('its true')
else:
print ('its false')
os.mkdir(rmgzdays)
if os.path.isdir(metzdays) == True:
print ('its true')
else:
print ('its false')
os.mkdir(metzdays)
if os.path.isdir(dayzpath) == True:
print ('its true')
else:
print ('its false')
os.mkdir(dayzpath)
# -
# Need to fix dir to just have /imgs/15/02/reference/imgnam-reference.jpg
monzpath
# +
iwcpath = 'imgs/' + yearz + '/' + monthz + '/' + dayz + '/reference'
#monzpath = (yrzpat + '/' + monthz)
#dayzpath = (monzpath + '/' + dayz)
#rmgzdays = (dayzpath + '/reference')
# -
#for liz in lizmon:
# if os.path.isdir(liz) == True:
## print 'its true'
# else:
# print 'its false'
# os.mkdir(liz)
fullhom = ('/home/wcmckee/getsdrawndotcom/')
# +
#artlist
# -
httpad = ('http://getsdrawn.com/imgs')
# +
#im = Image.new("RGB", (512, 512), "white")
#im.save(file + ".thumbnail", "JPEG")
# -
rmgzdays = (dayzpath + '/reference')
imgzdays = (dayzpath + '/art')
metzdays = (dayzpath + '/meta')
os.chdir(metzdays)
metadict = dict()
# if i save the data to the file how am i going to get it to update as the post is archieved. Such as up and down votes.
rgde = len(lisrgc)
rgde
alrgds = dict()
# +
#for lisr in lisrgc:
# print(lisr.author)
# print(lisr.title[0:30])
# -
for lisz in lisrgc:
metadict.update({'up': lisz.ups})
metadict.update({'down': lisz.downs})
metadict.update({'title': lisz.title})
metadict.update({'created': lisz.created})
#metadict.update({'createdutc': lisz.created_utc})
#print lisz.ups
#print lisz.downs
#print lisz.created
#print lisz.comments
import random
ranchor = random.choice(lisrgc)
titshort = ranchor.title[0:30]
titsre =titshort.replace(' ', '')
titsre
ranchor.url
ranautr = (ranchor.author)
hasra = ('#') + str(ranautr)
hasra
hasgd = ('#getsdrawn')
urlfin = ('http://getsdrawn.com/' + iwcpath + '/' + str(ranautr) + '-reference.png')
(urlfin)
twez = (titsre + ' ' + urlfin + ' ' + hasra + ' ' + hasgd)
len(twez)
# Need to save json object.
#
# Dict is created but it isnt saving. Looping through lisrgc twice, should only require the one loop.
#
# Cycle through lisr and append to dict/concert to json, and also cycle through lisr.author meta folders saving the json that was created.
for lisr in lisrgc:
gtdrndic.update({'title': lisr.title})
lisauth.append(str(lisr.author))
for osliz in os.listdir(metzdays):
with open(str(lisr.author) + '.meta', "w") as f:
rstrin = lisr.title.encode('ascii', 'ignore').decode('ascii')
#print matdict
#metadict = dict()
#for lisz in lisrgc:
# metadict.update({'up': lisz.ups})
# metadict.update({'down': lisz.downs})
# metadict.update({'title': lisz.title})
# metadict.update({'created': lisz.created})
f.write(rstrin)
# +
#matdict
# -
# I have it creating a meta folder and creating/writing username.meta files. It wrote 'test' in each folder, but now it writes the photo author title of post.. the username/image data. It should be writing more than author title - maybe upvotes/downvotes, subreddit, time published etc.
#
# +
#os.listdir(dayzpath)
# -
# Instead of creating these white images, why not download the art replies of the reference photo.
# +
#for lisa in lisauth:
# #print lisa + '-line.png'
# im = Image.new("RGB", (512, 512), "white")
# im.save(lisa + '-line.png')
# im = Image.new("RGB", (512, 512), "white")
# im.save(lisa + '-bw.png')
#print lisa + '-bw.png'
# im = Image.new("RGB", (512, 512), "white")
# im.save(lisa + '-colour.png')
#print lisa + '-colour.png'
# +
#lisauth
# -
# I want to save the list of usernames that submit images as png files in a dir.
# Currently when I call the list of authors it returns Redditor(user_name='theusername'). I want to return 'theusername'.
# Once this is resolved I can add '-line.png' '-bw.png' '-colour.png' to each folder.
# +
#lisr.author
# -
namlis = []
# +
#opsinz = open('/home/wcmckee/visignsys/index.meta', 'r')
#panz = opsinz.read()
# -
os.chdir(rmgzdays)
# Filter the non jpeg/png links. Need to perform request or imgur api to get the jpeg/png files from the link. Hey maybe bs4?
# +
#from imgurpython import ImgurClient
# +
#opps = open('/home/wcmckee/ps.txt', 'r')
#opzs = open('/home/wcmckee/ps2.txt', 'r')
#oprd = opps.read()
#opzrd = opzs.read()
# +
#client = ImgurClient(oprd, opzrd)
# Example request
#items = client.gallery()
#for item in items:
# print(item.link)
#itz = client.get_album_images()
# +
#galim = client.get_image('SBaV275')
# +
#galim.size
# +
#gelim = client.get_album_images('LTDJ9')
# +
#gelim
# +
#from urlparse import urlparse
# +
#linklis = []
# -
# I need to get the image ids from each url. Strip the http://imgur.com/ from the string. The gallery id is the random characters after. if it's an album a is added. if multi imgs then , is used to seprate.
#
# Doesnt currently work.
#
# Having problems with mixed /a/etwet and wetfwet urls. Using .strip('/') to remove forward slash in front of path.
# +
#pathlis = []
# -
#for rdz in lisrgc:
# if 'http://imgur.com/' in rdz.url:
# print rdz.url
# parsed = urlparse(rdz.url)
## print parsed.path.strip('/')
# pathlis.append(parsed.path.strip('/'))
#for pared in parsed.path:
# print pared.strip('/')
#itgar = client.gallery_item(parsed.path.strip('/'))
#itz = client.get_album_images(parsed.path.strip('a/'))
# reimg = requests.get(rdz.url)
## retxt = reimg.text
# souptxt = BeautifulSoup(''.join(retxt))
# soupurz = souptxt.findAll('img')
# for soupuz in soupurz:
# imgurl = soupuz['src']
# print imgurl
# linklis.append(imgurl)
#try:
# imzdata = requests.get(imgurl)
# +
#pathlis
# +
#noalis = []
# +
#for pathl in pathlis:
# if 'a/' in pathl:
# print 'a found'
# else:
# noalis.append(pathl)
# +
#if 'a/' in pathlis:
# print 'a found'
#else:
# noalis.append(pathlis)
# -
#for noaz in noalis:
# print noaz
#itgar = client.gallery_item()
# +
#linklis
# +
#if '.jpg' in linklis:
# print 'yes'
#else:
# print 'no'
# -
#panz()
for rdz in lisrgc:
(rdz.title)
#a(rdz.url)
if 'http://i.imgur.com' in rdz.url:
#print rdz.url
print (rdz.url)
url = rdz.url
response = requests.get(url, stream=True)
with open(str(rdz.author) + '-reference.png', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
apsize = []
aptype = []
basewidth = 600
imgdict = dict()
for rmglis in os.listdir(rmgzdays):
#print rmglis
im = Image.open(rmglis)
#print im.size
imgdict.update({rmglis : im.size})
#im.thumbnail(size, Image.ANTIALIAS)
#im.save(file + ".thumbnail", "JPEG")
apsize.append(im.size)
aptype.append(rmglis)
#for imdva in imgdict.values():
#print imdva
#for deva in imdva:
#print deva
# if deva < 1000:
# print 'omg less than 1000'
# else:
# print 'omg more than 1000'
# print deva / 2
#print imgdict.values
# Needs to update imgdict.values with this new number. Must halve height also.
# +
#basewidth = 300
#img = Image.open('somepic.jpg')
#wpercent = (basewidth/float(img.size[0]))
#hsize = int((float(img.size[1])*float(wpercent)))
#img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
#img.save('sompic.jpg')
# +
#os.chdir(metzdays)
# -
#for numz in apsize:
# print numz[0]
# if numz[0] > 800:
# print ('greater than 800')
# else:
# print ('less than 800!')
reliz = []
for refls in os.listdir(rmgzdays):
#print rmgzdays + refls
reliz.append(iwcpath + '/' + refls)
len(reliz)
# Tweet each reference img in list, removing the item when it's tweeted so that same item isn't tweeted twice.
# Make new list of items to tweet, appending in new items when site is updated
for apt in aptype:
print (apt)
# +
#opad = open('/home/wcmckee/ad.html', 'r')
# +
#opred = opad.read()
# +
#str2 = opred.replace("\n", "")
# +
#str2
# -
# +
doc = dominate.document(title='GetsDrawn')
with doc.head:
link(rel='stylesheet', href='style.css')
script(type ='text/javascript', src='script.js')
#str(str2)
with div():
attr(cls='header')
h1('GetsDrawn')
p(img('imgs/getsdrawn-bw.png', src='imgs/getsdrawn-bw.png'))
#p(img('imgs/15/01/02/ReptileLover82-reference.png', src= 'imgs/15/01/02/ReptileLover82-reference.png'))
h1('Updated ', strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
#p(panz)
p(bodycom)
with doc:
with div(id='body').add(ol()):
for rdz in reliz:
#h1(rdz.title)
#a(rdz.url)
#p(img(rdz, src='%s' % rdz))
#print rdz
p(img(rdz, src = rdz))
p(rdz)
#print rdz.url
#if '.jpg' in rdz.url:
# img(rdz.urlz)
#else:
# a(rdz.urlz)
#h1(str(rdz.author))
#li(img(i.lower(), src='%s' % i))
with div():
attr(cls='body')
p('GetsDrawn is open source')
a('https://github.com/getsdrawn/getsdrawndotcom')
a('https://reddit.com/r/redditgetsdrawn')
#print doc
# -
docre = doc.render()
#s = docre.decode('ascii', 'ignore')
yourstring = docre.encode('ascii', 'ignore').decode('ascii')
indfil = ('/home/wcmckee/getsdrawndotcom/index.html')
mkind = open(indfil, 'w')
mkind.write(yourstring)
mkind.close()
mkind = open(indfil, 'w')
mkind.write(yourstring)
mkind.close()
# +
#os.system('scp -r /home/wcmckee/getsdrawndotcom/ w<EMAIL>:/home/wcmckee/getsdrawndotcom')
# +
#rsync -azP source destination
# +
#updatehtm = raw_input('Update index? Y/n')
#updateref = raw_input('Update reference? Y/n')
#if 'y' or '' in updatehtm:
# os.system('scp -r /home/wcmckee/getsdrawndotcom/index.html w<EMAIL>:/home/wcmckee/getsdrawndotcom/index.html')
#elif 'n' in updatehtm:
# print 'not uploading'
#if 'y' or '' in updateref:
# os.system('rsync -azP /home/wcmckee/getsdrawndotcom/ w<EMAIL>:/home/wcmckee/getsdrawndotcom/')
# +
#os.system('scp -r /home/wcmckee/getsdrawndotcom/index.html <EMAIL>:/home/wcmckee/getsdrawndotcom/index.html')
# +
#os.system('scp -r /home/wcmckee/getsdrawndotcom/style.css <EMAIL>:/home/wcmckee/getsdrawndotcom/style.css')
# -
my_bot.send_tweet(twez)
| GetsDrawnDotCom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="OOMFKFnd5Hkz"
# # **Importing Libraries**
# + id="xk92m06VxzkX"
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import ParameterGrid
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.wrappers.scikit_learn import KerasClassifier
# + [markdown] id="drntyoLeiLB3"
# # **Preparing Data**
# + id="zuWQcMKBBqtn"
columns = ['datetime','discharge_patapsco (catonsville)',
'gage_height_patapsco (catonsville)', 'discharge_patapsco (elkridge)',
'gage_height_patapsco (elkridge)', 'HourlyAltimeterSetting',
'HourlyDryBulbTemperature',
'HourlyPrecipitation', 'HourlyRelativeHumidity','HourlyStationPressure',
'y']
df = pd.read_csv('test_train_2events.csv')
df = df[columns]
df['datetime'] = pd.to_datetime(df['datetime'])
# + colab={"base_uri": "https://localhost:8080/"} id="4MZQ6Tr-SsOJ" outputId="10e7ca8e-cb5b-4632-9f84-4f234f2f86f1"
# handling the cells in df['HourlyPrecipitation'] labeled as 'T'
# if the cell is 'T', replace 'T' with value of previous cell
#returns a list of indices for cells == 'T' in df['HourlyPrecipitation']
index = df.loc[df['HourlyPrecipitation'] == 'T', :].index
for i in index:
if df['HourlyPrecipitation'][i] == 'T':
#appending cells == 'T' with values of their previous cells
df['HourlyPrecipitation'][i] = df['HourlyPrecipitation'][i-1]
# + colab={"base_uri": "https://localhost:8080/"} id="m9eiNuKxxqaD" outputId="1329859a-fb64-41b1-d763-8edf5c161946"
# changing labels in df['y']
# 0 is now 'no flood'. 1 is 'flood'
# this step is needed for the encoding later
for i in df.index:
if df['y'][i] == 0:
df['y'][i] = 'no flood'
if df['y'][i] == 1:
df['y'][i] = 'flood'
# + id="-Q4E1FzlE5-G"
df['HourlyPrecipitation'] = df['HourlyPrecipitation'].astype('float64')
# prcp data was in 'object' format due to the 'T' mislabeling
# + id="nZ-2JKrVXns_"
# splitting training and testing data
train = df[df['datetime'].dt.year == 2018]
train = train.drop(['datetime'],axis=1)
test = df[df['datetime'].dt.year == 2016]
test = test.drop(['datetime'],axis=1)
# + id="ucRzJigcZc1A"
# encoding the data
def encode(train, test):
label_encoder_train = LabelEncoder().fit(train['y'])
labels_train = label_encoder_train.transform(train['y'])
label_encoder_test = LabelEncoder().fit(test['y'])
labels_test = label_encoder_test.transform(test['y'])
classes = list(label_encoder_test.classes_)
train = train.drop(['y'], axis=1)
test = test.drop(['y'], axis=1)
return train, labels_train, test, labels_test , classes
# + id="bohONNUWZho1"
# for training and testing
train, labels_train, test, labels_test , classes = encode(train, test)
# + id="FnChjgahatWX"
# standardize train features
scaler = StandardScaler().fit(train.values)
scaled_train = scaler.transform(train.values)
# + id="dip-sAgSayws"
# split train data into train and validation
sss = StratifiedShuffleSplit(test_size=0.1, random_state=23)
for train_index, valid_index in sss.split(scaled_train, labels_train):
X_train, X_valid = scaled_train[train_index], scaled_train[valid_index]
y_train, y_valid = labels_train[train_index], labels_train[valid_index]
# + id="J9nqv-RimWvr"
# standardize test features
scaler_test = StandardScaler().fit(test.values)
scaled_test = scaler_test.transform(test.values)
# + id="dr6XAVAacmK-"
# for confusion matrix and scoring
X_test = scaled_test
y_test0 = labels_test
# + id="nrLGRBVwtqPS"
# for cross validation usage
x_cross_val = pd.concat([test,train],axis=0) # test is 2016 event, train is 2018
scaler_cross_val = StandardScaler().fit(x_cross_val.values)
x = scaler.transform(x_cross_val.values)
y = np.hstack((labels_test,labels_train))
# + id="irCHhvPba2kc"
nb_features = 3 # number of features per features type (discharge, temp., pressure)
nb_class = len(classes)
# reshape train data
X_train_r = np.zeros((len(X_train), nb_features, 3))
X_train_r[:, :, 0] = X_train[:, :nb_features]
X_train_r[:, :, 1] = X_train[:, nb_features:6]
X_train_r[:, :, 2] = X_train[:, 6:]
# reshape validation data
X_valid_r = np.zeros((len(X_valid), nb_features, 3))
X_valid_r[:, :, 0] = X_valid[:, :nb_features]
X_valid_r[:, :, 1] = X_valid[:, nb_features:6]
X_valid_r[:, :, 2] = X_valid[:, 6:]
# reshape test data
X_test_r = np.zeros((len(X_test), nb_features, 3))
X_test_r[:, :, 0] = X_test[:, :nb_features]
X_test_r[:, :, 1] = X_test[:, nb_features:6]
X_test_r[:, :, 2] = X_test[:, 6:]
# reshape cross validation data
X_r = np.zeros((len(x), nb_features, 3))
X_r[:, :, 0] = x[:, :nb_features]
X_r[:, :, 1] = x[:, nb_features:6]
X_r[:, :, 2] = x[:, 6:]
# + id="DFYeIzkgerPA"
y_train = np_utils.to_categorical(y_train, nb_class)
y_valid = np_utils.to_categorical(y_valid, nb_class)
# + [markdown] id="_QkAKZiiyv4k"
# # **Building and Training the Model**
# + id="a5aHLBQWeMdT"
# Keras model with one Convolution1D layer
# unfortunately more number of covnolutional layers, filters and filters lenght
# don't give better accuracy
def create_model():
model = Sequential()
model.add(Convolution1D(filters = 3, kernel_size = 1,
input_shape=(nb_features, 3)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(2048, activation='relu'))
model.add(Dense(1024, activation='relu'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
sgd = SGD(lr=0.01, nesterov=True, decay=1e-6, momentum=0.9)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,metrics=['accuracy'])
return model
model = KerasClassifier(build_fn= create_model,epochs=100,
shuffle=False,verbose=2)
# + colab={"base_uri": "https://localhost:8080/"} id="ASbRKlaOevid" outputId="7226b27e-07a1-4dd3-fe1b-84fad3b7f0ff"
model.fit(X_train_r, y_train,validation_data=(X_valid_r, y_valid))
# + [markdown] id="TjxekO5v0Tap"
# # **Evaluation**
# + id="A7C0KIIHlkP8"
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
# + colab={"base_uri": "https://localhost:8080/"} id="Gb_LBjPLqgq9" outputId="f12307ba-811f-4d3b-e77a-97b12112c375"
y_pred = model.predict(X_test_r)
# + colab={"base_uri": "https://localhost:8080/", "height": 532} id="yRJlDi5a8YpU" outputId="4c339ccb-382f-44a0-e35b-6f0620fb8687"
#plotting the confusion matrix
cm = confusion_matrix(labels_test,y_pred)
plt.figure(figsize=(10,8))
sns.heatmap(cm,cmap='coolwarm',annot=True,annot_kws={'size':46})
plt.title('Confusion Matrix',fontsize=33)
# + colab={"base_uri": "https://localhost:8080/"} id="baN0UX2W0cpJ" outputId="9ceb0eb4-e387-4e28-b5ce-2c3d92d6dd6a"
#scoring the model
score = model.score(X_test_r,y_test0)
print("Test accuracy = %f%%" % (score*100))
# + colab={"base_uri": "https://localhost:8080/"} id="9nlF5YmMpC21" outputId="4b661e39-a672-4a21-d7f9-3c8ce340f2bc"
accuracy_scores = cross_val_score(model, X_r, y,cv= 5,
scoring = 'accuracy')
# + colab={"base_uri": "https://localhost:8080/"} id="Fi0s0kPM5n2N" outputId="0fa81cba-7805-4777-d0d4-eeded9841fa6"
print("accuracy scores = {}, mean = {}, stdev = {}".format(accuracy_scores, np.mean(accuracy_scores), np.std(accuracy_scores)))
# + [markdown] id="IBzGHSld5oA8"
# # **CNN vs. Synthetic Data**
# + id="oYjgoxX05mwy"
df_gan = pd.read_csv("gan_data3.csv")
df_gan = df_gan.drop(['Unnamed: 0','HourlyDewPointTemperature','HourlyVisibility'],axis=1)
# + id="xhABDEPG6bhJ"
x_gan = df_gan.drop(['y'],axis=1)
# standardize train features
scaler_gan = StandardScaler().fit(x_gan.values)
scaled_gan= scaler_gan.transform(x_gan.values)
# + id="XPtyW_mJ8I9I"
# reshape gan data
X_gan_r = np.zeros((len(scaled_gan), nb_features, 3))
X_gan_r[:, :, 0] = scaled_gan[:, :nb_features]
X_gan_r[:, :, 1] = scaled_gan[:, nb_features:6]
X_gan_r[:, :, 2] = scaled_gan[:, 6:]
# + id="42m2C0-s-E_0"
y_gan = df_gan['y'].astype('int64').values
# + colab={"base_uri": "https://localhost:8080/"} id="zsakh_o07_zj" outputId="05773eb5-1cee-4abc-f462-84decf96ccc6"
y_pred_gan = model.predict(X_gan_r)
# + colab={"base_uri": "https://localhost:8080/", "height": 532} id="pTpSiHAg9Qt1" outputId="57fe80a9-a80f-4b64-e564-dd581da08a25"
#plotting the confusion matrix for the gan data output
cm = confusion_matrix(y_gan,y_pred_gan)
plt.figure(figsize=(10,8))
sns.heatmap(cm,cmap='coolwarm',annot=True,annot_kws={'size':46})
plt.title('Confusion Matrix',fontsize=33)
# + colab={"base_uri": "https://localhost:8080/"} id="LkKCGdUi_Aue" outputId="8108d85e-6364-4624-d103-0012ebabf19e"
#scoring the model with GAN data
score_gan = model.score(X_gan_r,y_gan)
print("Test accuracy = %f%%" % (score_gan*100))
| CNN_vs__GAN3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="SuUqYbLTnSHU"
import time
from datetime import datetime
from dateutil.tz import tzutc
from ast import literal_eval
import re
import numpy as np
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder
import joblib
import pickle
from sklearn import preprocessing
from scipy import spatial
from tensorflow import keras
from sklearn.decomposition import PCA
import warnings
warnings.filterwarnings("ignore")
# + colab={"base_uri": "https://localhost:8080/", "height": 292} id="zdeo_tiyofe-" outputId="31d4fc9d-49ea-4f65-f395-e5dd264a5615"
# add test cases here and in videos/test csv
patient = 26207
patient_info = pd.read_csv('/data/patient_info_CONFIDENTIAL.csv')
videos = pd.read_csv('/data/videos_test.csv').set_index('Unnamed: 0')
videos.head()
# + id="z42tHs2EpNom"
patient_info = patient_info[['patient_id', 'age', 'sex', 'has_bh_specialist', 'total_activities_done', 'unique_days_with_activity']]
patient_info = pd.get_dummies(patient_info, columns = ['sex', 'has_bh_specialist'])
big = patient_info.merge(videos, on = "patient_id")
video_stats = big.groupby(['video_id']).mean()
video_features = videos.groupby('video_id').mean()
video_features['avg_age'] = video_stats['age']
video_features['gender'] = video_stats['sex_Male']
# + id="4sMS8xz_1OGg"
# Normalize, PCA
cols = list(video_features.columns)
x = video_features.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit(x)
x_scaled = min_max_scaler.transform(x)
video_features = pd.DataFrame(x_scaled)
dims = len(video_features.columns)
pca = joblib.load('/models/video_pca.pkl')
reduced_movie_features = pca.transform(video_features)
reduced_movie_features = pd.DataFrame(reduced_movie_features)
reduced_movie_features = reduced_movie_features.set_index(video_stats.index.values)
# + id="pFzjBvAu8aqk"
patient_features = big.groupby(['patient_id']).mean()
patient_features = patient_features [['age', 'sex_Female', 'sex_Male', 'has_bh_specialist_False', 'has_bh_specialist_True',
'length', 'video_created_time', 'video_views', 'primary_category_ADHD',
'primary_category_Anxiety', 'primary_category_Cognitive Behavioral Therapy',
'primary_category_Depression', 'primary_category_Managing Pain',
'primary_category_Mindfulness', 'primary_category_New & Expecting Mothers',
'primary_category_PTSD', 'primary_category_Sleep', 'primary_category_Stress',
'primary_category_Substance Use', 'primary_category_Yoga']]
patient_features = patient_features.dropna()
patient_index = patient_features.index.values
patient_features_unscaled = patient_features.copy()
cols = list(patient_features.columns)
x = patient_features.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
patient_features = pd.DataFrame(x_scaled)
user_pca = joblib.load('/models/user_pca.pkl')
reduced_patient_features = user_pca.transform(patient_features)
reduced_patient_features = pd.DataFrame(reduced_patient_features)
reduced_patient_features = reduced_patient_features.set_index(patient_index)
patient_features = patient_features.set_index(patient_index)
# + id="8Zh415N5n8uS"
from scipy import spatial
vids = video_stats.index.values
model = keras.models.load_model('/models/model.h5')
def get_closest_user(user, k, pca):
"""For a given user, returns the k nearest neighbors in the new PCA feature space.
params:
user - id of the user in question (int)
k - number of nearest neighbors
pca - PCA object for transform."""
patient_pca = pca.transform(patient_features)
patients = patient_features.index.values
patient_pca = pd.DataFrame(patient_pca)
patient_pca = patient_pca.set_index(patients)
patient_index = patient_pca[patient_pca.index.values == user]
patient_similarity = [spatial.distance.cosine(list(patient_index), list(x)[1:]) for x in patient_pca.itertuples()]
closest_indices = np.argpartition(patient_similarity, k+1).tolist()[1:k+1]
return patients[closest_indices]
def get_closest_movie(movie, user, k, pca):
"""For a given movie, return the k nearest movies in the new PCA feature space.
This movie cannot be seen before by the user. (Business logic)
params:
movie = vector of average movie
user = user id
k = number of nearest neighbors
pca = pca object"""
video_pca = pca.transform(video_features)
patients = video_features.index.values
video_pca = pd.DataFrame(video_pca)
video_pca = video_pca.set_index(vids)
transformed_movie = pca.transform(movie.reshape(-1, 1))[0]
video_similarity = [spatial.distance.cosine(transformed_movie, list(x)[1:]) for x in video_pca.itertuples()]
closest_indices = np.argpartition(video_similarity, k+1).tolist()[1:k+1]
video_similarity = np.array(video_similarity)
return vids[closest_indices], video_similarity[closest_indices]
def nn_predict(user):
"""Predicts next movie based on user ID."""
## First take a look at the user's features.
patient_info[patient_info['patient_id'] == user]
## We wish to transform these features using our PCA reduction
reduced_patient_features = user_pca.transform(patient_features)
reduced_patient_features = pd.DataFrame(reduced_patient_features)
reduced_patient_features = reduced_patient_features.set_index(patient_index)
user_features = reduced_patient_features[reduced_patient_features.index.values == 26207]
## This reduced feature space goes into our neural network
predictions = model.predict(user_features)[0]
# finding the predicted movie(s)
top_movies = predictions.argsort()[-10:][::-1]
## Convert index back to movie
return top_movies
# + colab={"base_uri": "https://localhost:8080/"} id="1qQQKkPwnt9p" outputId="cb257cc9-b78b-4275-d7e2-f4d990132584"
recommendations = nn_predict(patient)
vids_orig = pd.read_csv('/data/video_watched_events_CONFIDENTIAL.csv')
print('Based on your previous watch history, we recommend:')
print()
for rec in recommendations:
print(vids_orig.loc[rec, :].notes + ': ' + vids_orig.loc[rec, :].url)
| src/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %env CUDA_VISIBLE_DEVICES=0
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# %cd ..
from fastai.text import *
from multifit.datasets import ULMFiTDataset, Dataset
import multifit
import warnings
warnings.filterwarnings("ignore") # to ignore pytorch 1.3 warnings triggered by older fastai
# # CLS JA music
# +
# #!python prepare_cls.py https://storage.googleapis.com/ulmfit/cls
# -
exp = multifit.from_pretrained('ja_multifit_paper_version')
exp.arch
cls_dataset = exp.arch.dataset(Path('data/cls/ja-music'), exp.pretrain_lm.tokenizer)
cls_dataset.lang
cls_dataset.load_clas_databunch(bs=exp.finetune_lm.bs).show_batch()
exp.finetune_lm.train_(cls_dataset)
exp.load_(cls_dataset.cache_path/exp.pretrain_lm.name).finetune_lm
exp.classifier.train_(seed=0)
exp.classifier.train_(seed=1)
exp.classifier.train_(seed=2)
exp.classifier.train_(seed=3)
exp.classifier.train_(seed=4)
exp.classifier.train_(seed=5)
exp.classifier.train_(seed=6)
exp.classifier.train_(seed=7)
# # Results
def get_results(exp_path):
exp = multifit.ULMFiT().load_(exp_path, silent=False).classifier
results = exp.validate(use_cache=True)
results.update(seed=exp.seed, fp16=exp.fp16)
return results
results = [get_results(exp_path) for exp_path in cls_dataset.cache_path.glob(exp.pretrain_lm.name+"seed*")]
results_df = pd.DataFrame.from_records(results)
results_df.sort_values(["valid accuracy"])[["name", "seed", "test accuracy", "valid accuracy"]]
| notebooks/CLS-JA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:gsoc_ensembl]
# language: python
# name: conda-env-gsoc_ensembl-py
# ---
# +
####################################################################################################
# Copyright 2019 <NAME> and EMBL-European Bioinformatics Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
# -
# ## Loading all gene IDs from the csv file
import csv
import pandas as pd
import numpy as np
import json
# +
path = '/all_gene_data/all_gene_ids.csv'
df = pd.read_csv(path)
gene_ids = df['All gene ID'].tolist()
cleaned_ids = []
cleaned_ids = gene_ids
# -
# ## calculating time taken to open 1.05GB json file
# ## 'read_content' variable (given in below cell) contains the json response received
# %%time
with open("/merged_all_gene_data(dict).json") as access_json:
read_content = json.load(access_json)
# ## Get all gene IDs having biotype as 'lincRNA'
lincrna_ids = []
def get_all_lincrna_ids():
for i in range(len(cleaned_ids)):
if read_content[cleaned_ids[i]]['biotype'] == 'lincRNA':
lincrna_ids.append(cleaned_ids[i])
else:
continue
get_all_lincrna_ids()
len(lincrna_ids)
# ## Loading all lincRNA IDs from csv file to a list
cleaned_ids = []
path = '/all_lincRNA_data/all_lincrna_ids.csv'
df = pd.read_csv(path)
cleaned_ids = df['All lincRNA ID'].tolist()
# ## Total lincRNA gene IDs = 7690
len(cleaned_ids)
from tqdm import tqdm
gene_ids = []
null_gene_ids = []
def get_gene_data():
count = 0
for i in tqdm(range(len(cleaned_ids))):
if read_content[cleaned_ids[i]] == None:
null_gene_ids.append([cleaned_ids[i]])
#del cleaned_ids[i]
else:
gene_ids.append(cleaned_ids[i])
# gene_display_name.append(read_content[cleaned_ids[i]]['display_name'])
# gene_start.append(read_content[cleaned_ids[i]]['start'])
# gene_end.append(read_content[cleaned_ids[i]]['end'])
# gene_strand.append(read_content[cleaned_ids[i]]['strand'])
# gene_seq_region_name.append(read_content[cleaned_ids[i]]['seq_region_name'])
# gene_biotype.append(read_content[cleaned_ids[i]]['biotype'])
if cleaned_ids[i] in read_content:
count = count + 1
#print(count)
get_gene_data()
# ## 'null_gene_ids' variable contains the IDs having null values
print('No. of Null IDs are {0}'.format(len(null_gene_ids)))
print('Null IDs are :')
for i in range(len(null_gene_ids)):
print(null_gene_ids[i])
cleaned_ids = []
cleaned_ids = gene_ids
print(len(cleaned_ids))
transcript_strand = []
transcript_seq_region_name = []
# ## Below function [get_transcript_data() ] to extract 'transcript' data. Data Extracted are :
# 1. transcript id
# 2. transcript start
# 3. transcript end
# 4. transcript biotype
# +
def get_transcript_data():
for i in range(len(cleaned_ids)):
for j in range(len(read_content[cleaned_ids[i]]['Transcript'])):
transcript_strand.append(read_content[cleaned_ids[i]]['Transcript'][j]['strand'])
transcript_seq_region_name.append(read_content[cleaned_ids[i]]['Transcript'][j]['seq_region_name'])
# for k in range(len(gene_ids_for_transcripts)):
# print('Transcript "{0}" of gene ID "{1}" has start and end as : "{2}" & "{3}"'.format(transcript_id[k],gene_ids_for_transcripts[k],transcript_start[k],transcript_end[k]))
# -
get_transcript_data()
print(len(transcript_strand))
print(len(transcript_seq_region_name))
exon_strand = []
exon_seq_region_name = []
# ## Below function [get_exon_data() ] to extract 'exon' data. Data Extracted are :
# 1. exon id
# 2. exon start
# 3. exon end
# +
def get_exon_data():
for i in tqdm(range(len(cleaned_ids))):
for j in range(len(read_content[cleaned_ids[i]]['Transcript'])):
for k in range(len(read_content[cleaned_ids[i]]['Transcript'][j]["Exon"])):
exon_strand.append(read_content[cleaned_ids[i]]['Transcript'][j]["Exon"][k]['strand'])
exon_seq_region_name.append(read_content[cleaned_ids[i]]['Transcript'][j]["Exon"][k]['seq_region_name'])
# for l in range(len(transcript_ids_for_exons)):
# print('Exon "{0}" of Transcript ID "{1}" having gene ID "{2}" has start and end as : "{3}" & "{4}"'.format(exon_id[l],transcript_ids_for_exons[l],gene_ids_for_exons[l],exon_start[l],exon_end[l]))
# -
get_exon_data()
print(len(exon_seq_region_name))
print(len(exon_strand))
# # Transcript data correction
import pandas as pd
df = pd.read_csv('/all_lincRNA_data/all_lincrna_transcript_data.csv', index_col=0)
df.head(3)
df['Strand'] = transcript_strand
df['Seq region Name'] = transcript_seq_region_name
df.head(4)
df = df[['SNO', 'Gene ID', 'Transcript ID', 'Biotype', 'Strand', 'Seq region Name', 'Transcript Start', 'Transcript End', 'Transcript Length','No. of Exons']]
df.head(3)
len(df['SNO'])
df.to_csv('/all_lincRNA_data/all_lincrna_transcript_data.csv')
# # Exon data correction below
import pandas as pd
df = pd.read_csv('/all_lincRNA_data/all_lincrna_exon_data.csv', index_col=0)
len(df)
df['Strand'] = exon_strand
df['Seq region Name'] = exon_seq_region_name
df.head(3)
df = df[['SNO', 'Gene ID', 'Transcript ID', 'Exon ID', 'Strand','Seq region Name','Exon Start', 'Exon End', 'Exon Length']]
df.head(2)
df.to_csv('/all_lincRNA_data/all_lincrna_exon_data.csv')
# # Correcting sequence data
import pandas as pd
df = pd.read_csv('/all_lincRNA_data/all_lincrna_transcript_data_with_sequences.csv', index_col=0)
df.head(3)
df['Strand'] = transcript_strand
df['Seq region Name'] = transcript_seq_region_name
df.head(3)
df = df[['SNO', 'Gene ID', 'Transcript ID', 'Biotype', 'Strand', 'Seq region Name', 'Transcript Start', 'Transcript End', 'Transcript Length','No. of Exons', 'Length of Sequences', '| Seq_len - Trans_len |','Match ?', 'Sequences']]
df.head(2)
df.to_csv('/all_lincRNA_data/all_lincrna_transcript_data_with_sequences.csv')
| Ensembl-analysis/data_acquisition/ensembl_api_data/all_gene&lincRNA_data/python_scripts/get_strand+SeqRegionName_all_lincRNA_Ensembl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Changelog
# #### 1. 使用原始数据,未做normalize,使用2层same padding(k_size=3,padding=1)卷积,步长为2的池化,两个全连接层,88.7%的准确率(Linux platform)
# #### 2. 和1中的所有操作相同的,但是做了normalize,准确率88.84%(linux platform)
# #### 3. 利用2中的regularization,将卷积核的大小改为5,padding为2,准确率为89.15%(windows platform)
# #### 4. 在3的基础上,更换optimizer算法由SGD变为Adam,准确率为89.59%(Linux platform)
# #### 5. 在2的基础上,更换optimizer算法由SGD变为Adam,准确率为89.9%(Linux platform)
# #### 6. 加入net.eval(),准确率上升到90.94%(Linux platform)
# #### 7. 加入一个多加入了一个全连接隐层,准确率上升到91.75%(Linux platform)
# #### 8. 全连接层隐层神经元比例调整为9:3:2之后,同时加入一个新的卷基层,准确率为91.89%(Linux platform)
# #### 9. 去掉第一个卷基层后的BN层,调整第三个卷基层的感受野为5x5,准确率为92.07%(Linux platform)
# #### 10. 提高感受野,准确率为92.28%(Linux platform)
# #### 11. 增加第二,第三卷积层的输出feature map通道(channel)数,准确率为92.49%(Linux platform)
# +
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
# %matplotlib inline
# -
label_to_description = {0: 'T-shirt/top', 1: 'Trouser', 2: 'Pullover', 3: 'Dress',
4: 'Coat', 5: 'Sandal', 6: 'Shirt',
7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot',
}
path_to_datasets = '/home/lor/Datasets/FashionMNIST/FashionMNIST/processed/training.pt'
# path_to_datasets = 'E:/data/FashionMNIST/FashionMNIST/processed/training.pt'
samples = torch.load(path_to_datasets)
features = samples[0]
targets = samples[1]
print(features.shape, targets.shape)
print(targets.max())
print(targets.min())
ax = plt.subplot(2, 2, 1)
plt.imshow(np.array(features[3]), cmap='gray')
ax.set_title(label_to_description[targets.storage()[3]])
ax = plt.subplot(2, 2, 2)
plt.imshow(np.array(features[4]), cmap='gray')
ax.set_title(label_to_description[targets.storage()[4]])
plt.show()
# plt.title(label_to_description[targets.storage()[3]])
# label_to_description[targets.storage()[3]]
from torch.utils.data import Dataset, DataLoader
class UserDataset(Dataset):
def __init__(self, features, targets):
self.features = features
self.targets = targets
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
target = self.targets[idx]
feature = self.features[idx].unsqueeze(0).float()
feature = (feature - torch.min(feature)) / (torch.max(feature) - torch.min(feature))
return (target, feature)
train_dataset = UserDataset(features, targets)
train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)
one_iter = next(iter(train_dataloader))
one_iter[0].reshape(-1)
for i in range(10):
ax = plt.subplot(2, 5, i + 1)
plt.imshow(one_iter[1][i][0], cmap='gray')
plt.axis('off')
ax.set_title(label_to_description[one_iter[0].storage()[i]])
# +
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 8, kernel_size=(3, 3), stride=1, padding=1),
nn.BatchNorm2d(8),
nn.ReLU(),
nn.Conv2d(8, 18, kernel_size=(5, 5), stride=1, padding=2),
nn.BatchNorm2d(18),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(18, 26, kernel_size=(5, 5), stride=1, padding=2),
nn.BatchNorm2d(26),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=2)
)
self.fc1 = nn.Linear(7 * 7 * 26, 424)
self.fc2 = nn.Linear(424, 141)
self.fc3 = nn.Linear(141, 10)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = out.reshape(out.shape[0], -1)
out = self.fc1(out)
out = self.fc2(F.relu(out))
res = self.fc3(F.relu(out))
return res
def weight_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
nn.init.zeros_(m.bias)
# +
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = Network()
# net.apply(weight_init)
net.to(device)
loss = nn.CrossEntropyLoss()
# optimizer = optim.SGD(net.parameters(), lr=1e-3)
optimizer = optim.Adam(net.parameters(), lr=1e-3)
total_step = len(train_dataloader)
# -
num_epochs = 10
net.train()
for epoch in range(num_epochs):
for i, (target, feature) in enumerate(train_dataloader):
target, feature = target.to(device), feature.to(device)
out = net(feature)
# out = out.reshape(-1)
target = target.reshape(-1)
criterion = loss(out, target)
optimizer.zero_grad()
criterion.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, criterion.item()))
### 验证
# test_path = 'E:/data/FashionMNIST/FashionMNIST/processed/test.pt'
test_path = '/home/lor/Datasets/FashionMNIST/FashionMNIST/processed/test.pt'
test_dataset = torch.load(test_path)
print(test_dataset[0].shape)
print(test_dataset[1].shape)
test_dataset = UserDataset(test_dataset[0], test_dataset[1])
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=4)
total = 10000
pred_count = 0
net.eval()
for (target, feature) in test_dataloader:
# print(target.shape)
# print(feature.shape)
target = target.to(device)
feature = feature.to(device)
out = net(feature)
# print(torch.argmax(out, 1))
# print(target)
# print(torch.argmax(out, 1) == target)
# print((torch.argmax(out, 1) == target).sum().item())
pred_count = pred_count + (torch.argmax(out, 1) == target).sum().item()
# break
print("Accuracy: {0}%".format(100 * pred_count / total))
# Save the model
torch.save(net.state_dict(), './net.pt')
# Load the model
oth_net = Network()
oth_net.load_state_dict(torch.load('./net.pt'))
oth_net.eval()
| fashion_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import string
import matplotlib.pyplot as plt
import re
# Get data from files
headlines = pd.read_csv(r"C:\Users\Neal\OneDrive - University of Iowa\Spring 2021\GEOG 3760\Content Analysis\new_list.csv")
article_bodies = open(r"C:\Users\Neal\OneDrive - University of Iowa\Spring 2021\GEOG 3760\Content Analysis\text_file1.txt").read()
# Create dictionary of headline terms
stringtext = ""
for i in headlines["Headline"]:
stringtext += str(i+" ").lower()
headlines_clear = stringtext.translate(str.maketrans('', '', string.punctuation))
headlines_wordlist = headlines_clear.split()
headlines_wordfreq = {}
for w in headlines_wordlist:
headlines_wordfreq[w] = headlines_wordlist.count(w)
headlines_sorted = dict(sorted(headlines_wordfreq.items(), key = lambda item: item[1],reverse=True))
# Get count of term in headlines
headlines_wordfreq['tornado']
# +
# Create bar chart for word frequency
df_headlines = pd.DataFrame.from_dict(headlines_sorted,orient='index')
df_headlines.columns = df_headlines.columns.map(str)
df = df_headlines[df_headlines.index.str.len()>4]
plt.bar(df.index[:20],df["0"][:20])
plt.ylabel('Word Count')
plt.xlabel('Word')
plt.xticks(df.index[:20], df.index[:20], rotation='vertical')
plt.show()
# -
# Find keyword in article bodies and display immediate surrounding text
keyword = 'happy'
articles_split = re.split("News;",article_bodies)
for article in articles_split:
if keyword in article:
index_val = article.find(keyword)
sentence = ""
for letter in range(index_val-30, index_val+45):
try: sentence += article[letter]
except: continue
print(str(sentence),"\n")
# Get full sentences containing terms of interest
article_search = [text.split('. ') for text in article_bodies.split('. ') if 'sad' in text]
for line in article_search:
print(re.sub("[^0-9A-Za-z ]", "", str(line)),"\n")
# Create dictionary of temrs from full articles
articles_clear = article_bodies.translate(str.maketrans('', '', string.punctuation))
# articles_clear = re.sub("[^0-9A-Za-z]", "", article_bodies)
# articles_clear = re.sub("([A-Z+]+)","",articles_Clear)
articles_split = articles_clear.split()
articles_wordfreq = {}
for w in articles_split:
articles_wordfreq[w] = articles_split.count(w)
articles_sorted = dict(sorted(articles_wordfreq.items(), key = lambda item: item[1],reverse=True))
common_words = ['there','going','about','right','their','because','still','other','think',
'really','those','could','after','UNIDENTIFIED','these','thats','VIDEO',
'where','COOPER','today','through','trying','youre','theyre','theres',
'which','would','Thats','BEGIN','There','around','being']
# +
# Create bar chart for word frequency
copy1 = articles_sorted.copy()
copy2 = copy1.copy()
for key in copy2.keys():
if key in common_words:
copy1.pop(key)
df_articles = pd.DataFrame.from_dict(copy1,orient='index')
df_articles.columns = df_articles.columns.map(str)
df = df_articles[df_articles.index.str.len()>4]
plt.bar(df.index[:20],df["0"][:20])
plt.ylabel('Word Count')
plt.xlabel('Word')
plt.xticks(df.index[:20], df.index[:20], rotation='vertical')
plt.show()
| Example_Code/word_script.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Thurunany/2021_EAD/blob/main/POO_5_Thuany_3info1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="IECQuKfgd4F_" outputId="27cf6be6-15c2-46e8-e26d-4e416717b117"
class Usuario():
primeiroNome = ""
ultimoNome = ""
def __init__(self, primeiroNome, ultimoNome):
self.primeiroNome = primeiroNome
self.ultimoNome = ultimoNome
def getNomeCompleto(self):
return self.primeiroNome + " " + self.ultimoNome
usuario1 = Usuario("Johnny", "Bravo")
print(usuario1.getNomeCompleto())
| POO_5_Thuany_3info1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="bOChJSNXtC9g" colab_type="text"
# # Introduction to Python
# + [markdown] id="OLIxEDq6VhvZ" colab_type="text"
# <img src="https://raw.githubusercontent.com/GokuMohandas/practicalAI/master/images/logo.png" width=150>
#
# In this lesson we will learn the basics of the Python programming language (version 3). We won't learn everything about Python but enough to do some basic machine learning.
#
# <img src="https://raw.githubusercontent.com/GokuMohandas/practicalAI/master/images/python.png" width=350>
#
#
#
# + [markdown] id="VoMq0eFRvugb" colab_type="text"
# # Variables
# + [markdown] id="qWro5T5qTJJL" colab_type="text"
# Variables are objects in python that can hold anything with numbers or text. Let's look at how to make some variables.
# + id="0-dXQiLlTIgz" colab_type="code" outputId="38d1f8a5-b067-416b-b042-38a373624a8b" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Numerical example
x = 5
print (x)
# + id="5Ym0owFxTkjo" colab_type="code" outputId="72c2781a-4435-4c21-b15a-4c070d47bd86" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Text example
x = "hello"
print (x)
# + id="1a4ZhMV1T1-0" colab_type="code" outputId="0817e041-5f79-46d8-84cc-ee4aaea0eba2" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Variables can be used with each other
a = 1
b = 2
c = a + b
print (c)
# + [markdown] id="nbKV4aTdUC1_" colab_type="text"
# Variables can come in lots of different types. Even within numerical variables, you can have integers (int), floats (float), etc. All text based variables are of type string (str). We can see what type a variable is by printing its type.
# + id="c3NJmfO4Uc6V" colab_type="code" outputId="04b91fa4-51af-48f4-e9ac-591b5bf3e714" colab={"base_uri": "https://localhost:8080/", "height": 153}
# int variable
x = 5
print (x)
print (type(x))
# float variable
x = 5.0
print (x)
print (type(x))
# text variable
x = "5"
print (x)
print (type(x))
# boolean variable
x = True
print (x)
print (type(x))
# + [markdown] id="6HPtavfdU8Ut" colab_type="text"
# It's good practice to know what types your variables are. When you want to use numerical operations on then, they need to be compatible.
# + id="8pr1-i7IVD-h" colab_type="code" outputId="c2bce48d-b69f-4aab-95c1-9e588f67a6c3" colab={"base_uri": "https://localhost:8080/", "height": 51}
# int variables
a = 5
b = 3
print (a + b)
# string variables
a = "5"
b = "3"
print (a + b)
# + [markdown] id="q4R_UF6PVw4V" colab_type="text"
# # Lists
# + [markdown] id="LvGsQBj4VjMl" colab_type="text"
# Lists are objects in python that can hold a ordered sequence of numbers **and** text.
# + id="9iPESkq9VvlX" colab_type="code" outputId="67dfbe9f-d4cb-4a62-a812-7c5c8a01c2fa" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Making a list
list_x = [3, "hello", 1]
print (list_x)
# + [markdown] id="0xC6WvuwbGDg" colab_type="text"
#
# + id="7lbajc-zV515" colab_type="code" outputId="4345bbe0-0f0c-4f84-bcf2-a76130899f34" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Adding to a list
list_x.append(7)
print (list_x)
# + id="W0xpIryJWCN9" colab_type="code" outputId="a7676615-aff1-402f-d41f-81d004728f94" colab={"base_uri": "https://localhost:8080/", "height": 102}
# Accessing items at specific location in a list
print ("list_x[0]: ", list_x[0])
print ("list_x[1]: ", list_x[1])
print ("list_x[2]: ", list_x[2])
print ("list_x[-1]: ", list_x[-1]) # the last item
print ("list_x[-2]: ", list_x[-2]) # the second to last item
# + id="VSu_HNrnc1WK" colab_type="code" outputId="3c40cce2-9599-41aa-b01c-7c6f39329212" colab={"base_uri": "https://localhost:8080/", "height": 85}
# Slicing
print ("list_x[:]: ", list_x[:])
print ("list_x[2:]: ", list_x[2:])
print ("list_x[1:3]: ", list_x[1:3])
print ("list_x[:-1]: ", list_x[:-1])
# + id="dImY-hVzWxB4" colab_type="code" outputId="8394f232-aa11-4dbd-8580-70adb5adc807" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Length of a list
len(list_x)
# + id="3-reXDniW_sm" colab_type="code" outputId="382d1a40-ad1a-49f7-f70f-2c2a02ffd88d" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Replacing items in a list
list_x[1] = "hi"
print (list_x)
# + id="X8T5I3bjXJ0S" colab_type="code" outputId="1ede1c5c-c6ea-452f-b13d-ff9efd3d53b0" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Combining lists
list_y = [2.4, "world"]
list_z = list_x + list_y
print (list_z)
# + [markdown] id="ddpIO6LLVzh0" colab_type="text"
# # Tuples
# + [markdown] id="CAZblq7oXY3s" colab_type="text"
# Tuples are also objects in python that can hold data but you cannot replace values (for this reason, tuples are called immutable, whereas lists are known as mutable).
# + id="G95lu8xWXY90" colab_type="code" outputId="c23250e5-534a-48e6-ed52-f034859f73c2" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Creating a tuple
tuple_x = (3.0, "hello")
print (tuple_x)
# + id="kq23Bej1acAP" colab_type="code" outputId="34edfbff-dbc0-4385-a118-7f1bcc49e84f" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Adding values to a tuple
tuple_x = tuple_x + (5.6,)
print (tuple_x)
# + id="vyTmOc6BXkge" colab_type="code" outputId="dadeac9a-4bb4-43a3-ff40-e8ca6a05ba2c" colab={"base_uri": "https://localhost:8080/", "height": 164}
# Trying to change a tuples value (you can't)
tuple_x[1] = "world"
# + [markdown] id="UdlJHkwZV3Mz" colab_type="text"
# # Dictionaries
# + [markdown] id="azp3AoxYXS26" colab_type="text"
# Dictionaries are python objects that hold key-value pairs. In the example dictionary below, the keys are the "name" and "eye_color" variables. They each have a value associated with them. A dictionary cannot have two of the same keys.
# + id="pXhNLbzpXXSk" colab_type="code" outputId="e4bb80e5-4e7b-4cbb-daa6-77490ab25145" colab={"base_uri": "https://localhost:8080/", "height": 68}
# Creating a dictionary
goku = {"name": "Goku",
"eye_color": "brown"}
print (goku)
print (goku["name"])
print (goku["eye_color"])
# + id="1HXtX8vQYjXa" colab_type="code" outputId="ad8d1a0f-d134-4c87-99c1-0f77140f2de0" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Changing the value for a key
goku["eye_color"] = "green"
print (goku)
# + id="qn33iB0MY5dT" colab_type="code" outputId="bd89033e-e307-4739-8c1d-f957c32385b5" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Adding new key-value pairs
goku["age"] = 24
print (goku)
# + id="g9EYmzMKa9YV" colab_type="code" outputId="4b9218b9-2f4d-4287-932a-caba430713aa" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Length of a dictionary
print (len(goku))
# + [markdown] id="B-DInx_Xo2vJ" colab_type="text"
# # If statements
# + [markdown] id="ZG_ICGRGo4tY" colab_type="text"
# You can use if statements to conditionally do something.
# + id="uob9lQuKo4Pg" colab_type="code" outputId="21d40476-ea6a-4149-f744-0119d0894d77" colab={"base_uri": "https://localhost:8080/", "height": 34}
# If statement
x = 4
if x < 1:
score = "low"
elif x <= 4:
score = "medium"
else:
score = "high"
print (score)
# + id="vwsQaZqIpfJ3" colab_type="code" outputId="1f190875-b910-4e54-a58a-d4230b7c8169" colab={"base_uri": "https://localhost:8080/", "height": 34}
# If statment with a boolean
x = True
if x:
print ("it worked")
# + [markdown] id="sJ7NPGEKV6Ik" colab_type="text"
# # Loops
# + [markdown] id="YRVxhVCkn0vc" colab_type="text"
# You can use for or while loops in python to do something repeatedly until a condition is met.
# + id="OB5PtyqAn8mj" colab_type="code" outputId="b4595670-99d4-473e-b299-bf8cf47f1d81" colab={"base_uri": "https://localhost:8080/", "height": 68}
# For loop
x = 1
for i in range(3): # goes from i=0 to i=2
x += 1 # same as x = x + 1
print ("i={0}, x={1}".format(i, x)) # printing with multiple variables
# + id="6XyhCrFeoGj4" colab_type="code" outputId="2427ae1f-85f7-4888-f47f-8de1992a84c3" colab={"base_uri": "https://localhost:8080/", "height": 68}
# Loop through items in a list
x = 1
for i in [0, 1, 2]:
x += 1
print ("i={0}, x={1}".format(i, x))
# + id="5Tf2x4okp3fH" colab_type="code" outputId="1ac41665-2f35-4c7d-e9f5-22614d3ba35c" colab={"base_uri": "https://localhost:8080/", "height": 68}
# While loop
x = 3
while x > 0:
x -= 1 # same as x = x - 1
print (x)
# + [markdown] id="gJw-EDO9WBL_" colab_type="text"
# # Functions
# + [markdown] id="hDIOUdWCqBwa" colab_type="text"
# Functions are a way to modularize resuable pieces of code.
# + id="iin1ZXmMqA0y" colab_type="code" outputId="3bfae4a7-482b-4d43-8350-f8bb5e8a35ac" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Create a function
def add_two(x):
x += 2
return x
# Use the function
score = 0
score = add_two(x=score)
print (score)
# + id="DC6x3DMrqlE3" colab_type="code" outputId="8965bfab-3e20-41ae-9fc1-f22a7d4f3333" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Function with multiple inputs
def join_name(first_name, last_name):
joined_name = first_name + " " + last_name
return joined_name
# Use the function
first_name = "Goku"
last_name = "Mohandas"
joined_name = join_name(first_name=first_name, last_name=last_name)
print (joined_name)
# + [markdown] id="lBLa1n54WEd2" colab_type="text"
# # Classes
# + [markdown] id="mGua8QnArAZh" colab_type="text"
# Classes are a fundamental piece of object oriented Python programming.
# + id="DXmPwI1frAAd" colab_type="code" colab={}
# Create the function
class Pets(object):
# Initialize the class
def __init__(self, species, color, name):
self.species = species
self.color = color
self.name = name
# For printing
def __str__(self):
return "{0} {1} named {2}.".format(self.color, self.species, self.name)
# Example function
def change_name(self, new_name):
self.name = new_name
# + id="ezQq_Fhhrqrv" colab_type="code" outputId="bf159745-99b1-4e33-af4d-f63924a1fe74" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Making an instance of a class
my_dog = Pets(species="dog", color="orange", name="Guiness",)
print (my_dog)
print (my_dog.name)
# + id="qTinlRj1szc5" colab_type="code" outputId="80939a31-0242-4465-95ff-da0e5caaa67c" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Using a class's function
my_dog.change_name(new_name="Charlie")
print (my_dog)
print (my_dog.name)
# + [markdown] id="kiWtd0aJtNtY" colab_type="text"
# # Additional resources
# + [markdown] id="cfLF4ktmtSC3" colab_type="text"
# This was a very quick look at python and we'll be learning more in future lessons. If you want to learn more right now before diving into machine learning, check out this free course: [Free Python Course](https://www.codecademy.com/learn/learn-python)
| notebooks/01_Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data',header = None)
df_wine.columns = ['Class label', 'Alcohol',
'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium',
'Total phenols', 'Flavanoids',
'Nonflavanoid phenols',
'Proanthocyanins',
'Color intensity', 'Hue',
'OD280/OD315 of diluted wines',
'Proline']
from sklearn.model_selection import train_test_split
X, y = df_wine.iloc[:,1:].values, df_wine.iloc[:,0].values
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.3 , stratify = y, random_state = 0)
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
X_train_std, X_test_std = std.fit_transform(X_train), std.fit_transform(X_test)
np.set_printoptions(precision = 4)
mean_vec = []
for label in range(1,4):
mean_vec.append(np.mean(X_train_std[y_train == label], axis = 0))
print('MV %s: %s\n' %(label, mean_vec[label-1]))
mean_vec
#Calculationg whithin class scatter matrices
d = 13 # number of feature
S_w = np.zeros((d,d))
for label, mv in zip(range(1,4), mean_vec):
class_scatter = np.zeros((d,d))
for row in X_train_std[y_train == label]:
row, mv = row.reshape(d,1), mv.reshape(d,1)
class_scatter += (row - mv).dot((row - mv).T)
S_w+=class_scatter
print("Whithin Class scatter matrix - %sx%s" %(S_w.shape[0], S_w.shape[1]))
#We are assuming that the class labels are uniformly distributed but that condition is violated
print("Class Label Distribution - ", np.bincount(y_train)[1:])
#So we need a scaled version of scater matrix
d = 13 # number of feature
S_w = np.zeros((d,d))
for label, mv in zip(range(1,4), mean_vec):
class_scatter = np.cov(X_train_std[y_train == label].T)
S_w+=class_scatter
print("Whithin Class scatter matrix - %sx%s" %(S_w.shape[0], S_w.shape[1]))
#Next we compute within class scatter matrix
mean_overall = np.mean(X_train_std, axis = 0)
d = 13
S_b = np.zeros((d,d))
for i, m_v in enumerate(mean_vec):
n = X_train_std[y_train == i+1].shape[1]
m_v = m_v.reshape(d,1)
mean_overall = mean_overall.reshape(d,1)
S_b+= n*(m_v - mean_overall).dot((m_v - mean_overall).T)
print("Whithin Class scatter matrix - %sx%s" %(S_b.shape[0], S_b.shape[1]))
#Computing epairs for Sw-1Sb
eVal, eVec = np.linalg.eig(np.linalg.inv(S_w).dot(S_b))
ePairs = [(np.abs(eVal[i]), eVec[:,i]) for i in range(len(eVal))]
ePairs.sort(key = lambda k:k[0], reverse = True) # sorting the tuples in decreasing order of e-vals
print('Eigenvalues in descending order:\n')
for eV in ePairs:
print(eV[0])
tot = sum(eVal.real)
discr = [(i/tot) for i in sorted(eVal.real, reverse=True)]
cum_discr = np.cumsum(discr)
plt.bar(range(1, 14), discr, alpha=0.5, align='center', label='Individual "discriminability"')
plt.step(range(1, 14), cum_discr, where='mid', label='Cumulative "discriminability"')
plt.ylabel("Dicriminability Ratio")
plt.xlabel("Linear Discriminants")
plt.ylim([-0.1,1.1])
plt.legend(loc = 'best')
plt.tight_layout()
plt.show()
w = np.hstack((ePairs[0][1][:, np.newaxis],
ePairs[1][1][:, np.newaxis]))
w
colors = ['r','b','g']
markers = ['s', 'x', 'o']
X_train_lda = (-1)*(X_train_std.dot(w))
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_lda[y_train == l, 0],X_train_lda[y_train == l, 1],c = c, label = l,marker = m)
plt.xlabel('ld 1')
plt.ylabel('ld 2')
plt.legend(loc = 'best')
plt.tight_layout()
plt.show()
from matplotlib.colors import ListedColormap
def plotDecisionRegion(X, y, classifier, test_idx = None, resolution = 0.02):
markers = ('s','x','o','^','v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min, x1_max = X[:, 0].min() - 1,X[:,0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1,X[:,1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),
np.arange(x2_min,x2_max, resolution))
#xx1, xx2 are the coordinates of x and y respectively, we pair each value of the two corresponding matrices and get a grid
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1,xx2, Z, alpha = 0.3, cmap = cmap)
plt.xlim(xx1.min(),xx1.max())
plt.ylim(xx2.min(),xx2.max())
for idx, c1 in enumerate(np.unique(y)):
plt.scatter(x =X[y==c1,0],
y = X[y==c1,1],
alpha =0.8,
c = colors[idx],
marker = markers[idx],
label = c1, edgecolor='black')
if test_idx:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:,0], X_test[:,1], c= '', edgecolors='black',
alpha=1.0, linewidths=1, marker='o', s=100, label='test set')
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.linear_model import LogisticRegression
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train_std, y_train)
lr = LogisticRegression(multi_class='ovr', random_state=1, solver='lbfgs')
lr.fit(X_train_lda, y_train)
plotDecisionRegion(X_train_lda, y_train, classifier=lr)
plt.xlabel('ld 1')
plt.ylabel('ld 2')
plt.legend(loc = 'best')
plt.tight_layout()
plt.show()
X_test_lda = lda.fit_transform(X_test_std, y_test)
plotDecisionRegion(X_test_lda, y_test, classifier=lr)
plt.xlabel('ld 1')
plt.ylabel('ld 2')
plt.legend(loc = 'best')
plt.tight_layout()
plt.show()
| DataCompressionAndDimentionalityReduction/.ipynb_checkpoints/Linear Discriminant Analysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# # Plot a Spherical Harmonics
#
# Useful reference: https://balbuceosastropy.blogspot.co.uk/2015/06/spherical-harmonics-in-python.html
# #### Setup Environment
# Setup ipython environment
# %load_ext autoreload
# %autoreload 2
# # %matplotlib auto
# %matplotlib inline
# Import useful things
#
# from os import system
# system('source ~/.virtual_enviroments/ringdownFD_nikhef/bin/activate')
from nrutils.core.basics import lalphenom
from kerr import rgb,gmvpfit,modelrd,anglep,qnmfit,leaver,ndflatten,sunwrap,alert,cyan,slm
from kerr.models import mmrdns as ns
from nrutils import scsearch,gwylm,FinalSpin0815,EradRational0815
# Setup plotting backend
import matplotlib as mpl
from mpl_toolkits.mplot3d import axes3d
mpl.rcParams['lines.linewidth'] = 0.8
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 20
from matplotlib.pyplot import *
from mpl_toolkits.mplot3d import Axes3D
#
from matplotlib import cm, colors
#
import corner
#
from numpy import *
from numpy.linalg import pinv
#
from nrutils.core.units import *
#
from scipy.optimize import minimize
from os.path import expanduser
# #### Calculate Harmonic over Sky
# +
#
n = 260
th_ = linspace(0,pi,n)
ph_ = linspace(-pi,pi,n+1)
#
TH,PH = meshgrid(th_,ph_)
#
s = -2
l,m = 2,2
#
SYLM = sYlm(s,l,m,th_,ph_)
SYLM += sYlm(s,l,-m,th_,ph_)
# +
fig = figure(figsize=5*figaspect(1.2))
ax = fig.add_subplot(111, projection='3d')
#
R = abs(SYLM)
# R = SYLM.real
# The Cartesian coordinates of the unit sphere
X = R * sin(TH) * cos(PH)
Y = R * sin(TH) * sin(PH)
Z = R * cos(TH)
#
norm = colors.Normalize()
#
# N = (R - R.min())/(R.max() - R.min())
N = R/R.max()
# N = (SYLM - SYLM.min())/(SYLM.max() - SYLM.min())
# N = N.real
#
im = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.jet( norm(R) ))
#
m = cm.ScalarMappable(cmap=cm.jet)
m.set_array(R) # Assign the unnormalized data array to the mappable
# so that the scale corresponds to the values of R
fig.colorbar(m, shrink=0.33)
axis('equal');
axis('square');
# Plot the surface.
# surf = ax.plot_surface(X, Y, Z,rstride=1, cstride=1, facecolors=cm.jet(fcolors), alpha=0.8 )
# surf = ax.plot_surface(X, Y, Z,rstride=1, cstride=1, facecolors=cm.jet(fcolors) )
# -
#
l,m = 2,-2
fig = figure( figsize= 2*figaspect(.68) )
ax = subplot(111,projection='mollweide')
sYlm_mollweide_plot(l,m,ax,N=200,form='r')
| factory/.ipynb_checkpoints/plot_spherical_harmonic-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Ejercicio Ridge, Lasso y ElasticNet
# Para este ejercicio vas a trabajar con datos de ventas de una empresa que fabrica muebles en España. Esta empresa es una Pyme que demanda conocer de antemano qué ingresos va a tener cada mes, ya que necesita pedir un anticipio a su entidad financiera de cara a poder abastecerse de materia prima al comenzar el mes. Como desconoce cuánto tiene que pedir, tira al alza y acaba pagando muchos intereses. El objetivo es reducir estos gastos.
#
# En las bases de datos de la empresa constan todos los gastos en publicidad y ventas, para cada uno de los meses desde su fundación (hace más de 15 años).
#
# Dado que los presupuestos de marketing se cierran al principio de cada mes, la empresa necesita un modelo predictivo que le anticipe las ventas que conseguirá a final de mes en función de los gastos que realizará en marketing.
#
# Para ello tendrás que utilizar tanto modelos de regresión normales, como regularizados.
#
# 1. Carga los datos y realiza un pequeño análisis exploratorio. Mira a ver cómo se relaciona las ventas con el resto de variables.
# 2. Crea varios modelos y modifica los hiperparámetros necesarios para mejorar el performance del modelo.
# 3. Interpreta los resultados. ¿Cuánto cambian las ventas si aumentamos la inversión en radio un punto más? ¿Y si aumentamos la inversión en TV o periódicos?
# ## Import libraries
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
# ## Exploratory Data Analysis
# +
path = 'data/Advertising.csv'
data = pd.read_csv(path, index_col=0)
data.head()
# -
len(data)
data.columns
def scatter_plot(feature, target):
plt.figure(figsize=(16, 8))
plt.scatter(
data[feature],
data[target],
c='black'
)
plt.xlabel("Money spent on {} ads ($)".format(feature))
plt.ylabel("Sales")
plt.show()
scatter_plot('TV', 'sales')
scatter_plot('newspaper', 'sales')
data.corr()
import seaborn as sns
sns.displot(data['sales'])
# ## Modelado
# +
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
X_train, X_test, y_train, y_test =
data['sales'], test_size =0.20
# -
# ## Interpretación de los resultados
| 4-Machine_Learning/1-Supervisado/3-Regularization/ejercicios/Notas_Advertising Ridge Lasso.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from PIL import Image
import random
import os
import time
import numpy as np
import cv2
#Move to parent folder
sys.path.insert(0, '../croprows-cli/src/')
Image.MAX_IMAGE_PIXELS = 1000000000
# +
#cwd = os.getcwd()
#print(cwd)
# -
def secuentalCropper(cwd, imgFile, tileSizeI):
pic = Image.open(imgFile)
imageSize = pic.size
tileSize = int(tileSizeI)
# start timing the script
start_time = time.time()
imageWidth = imageSize[0]
imageHeight = imageSize[1]
tile_size = tileSize
nCols = int(imageWidth / tile_size)
nRows = int(imageHeight / tile_size)
nTiles = nCols * nRows
cols = range(1,nCols,1)
rows = range(1,nRows,1)
print(cols)
imagePath = cwd+'/cropped_images/'
try:
os.stat(imagePath)
except:
os.mkdir(imagePath)
# image filename
picName = 'pic'
fileExt = '.jpg'
# make folder with image name for organization
dir_name = imagePath+picName+"_"+str(nCols)+"_"+str(nRows)+"_"+str(tile_size)+"px"+"_"+str(nTiles)+"tiles"
# make the folder if necessary
print(dir_name)
try:
os.stat(dir_name)
except:
os.mkdir(dir_name)
print("Generating " + str(nTiles) + " tiles, in directory " + imagePath)
for i in cols:
for j in rows:
x2 = 0 + (i * tile_size)
y2 = 0 + (j * tile_size)
x1 = x2 - tile_size
y1 = y2 - tile_size
# create a box to crop with
box = (x1,y1,x2,y2)
# create crop region
region = pic.crop(box)
# save cropped region
finalFilename = dir_name+"/"+picName+"-col_"+str(i)+"-row_"+str(j)+fileExt
print(finalFilename)
region.save(finalFilename)
# print confirmation message
print("File: " + finalFilename + " successfully saved!")
end_time = time.time()
elapsed_time = end_time - start_time
# print total image generation time
print("Image generation total processing time: "+str(elapsed_time) + " seconds")
secuentalCropper("../orthomosaics/tmp", "../orthomosaics/testimage2.tif",400)
| test/notebooks/Unit_Preprocess_ImageSecuentialCropper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <title>
# Microbial Genomics Group Rotation Report |
# <NAME> |
# MCB PhD school
# </title>
# # Scope
# How to make sense of microbial sequence data?
#
# # Topics
# ## Next Generation Sequencing (Illumina vs Nanopore)
# Understanding of differences between outputs of two most common sequencing platforms
#
# ### Illumina
# Great quality of reads, running on short reads
#
# ### Nanopore
# Poor to decent quality of reads, running on long reads
# <img src="https://i.imgur.com/FD6stLs.png"
# alt="Illumina vs Nanopore"
# style="float: left; margin-right: 10px;" />
# ## Quality Control
# ### Illumina
# Base pairs are read based on the fluorescence signal
# ### Nanopore
# Base pairs are read based on basecalling on raw electric current signal
# ### Why do we need QC
# Uncertainity in the ability to read signal is reflected in the QC score
# ### FastQ format
# As presented in [online materials](https://en.wikipedia.org/wiki/FASTQ_format) every Illumina read has its certainity score encoded in ASCII characters for better control of quality of the results. Those will be explored in excercises presended below.
# ### Excercise 1
# Open <code>assembly-data/part1_qc/data/short_reads_1.fastq</code> using a text editor (or "head -4" command)
#
# Open [wiki](https://en.wikipedia.org/wiki/FASTQ_format)
#
# Investigate the first short-read
#
# Answer the following questions:
#
# <code>
# A. Which ASCII character corresponds to the worst Phred score for Illumina 1.8+?
# B. What is the Phred quality score of the 3rd nucleotide of the 1st sequence?
# C. What is the accuracy of this 3rd nucleotide?
# </code>
# + language="bash"
# head -4 assembly-data/part1_qc/data/short_reads_1.fastq
# -
# ### Answer 1
# <code>
# A. "!" corresponds to the worst Phred score for Phred-33 (Illumina 1.8+)
# B. Phred score of 3rd nucleotide of 1st sequence is 40
# C. Accuracy is above 99.99%, probability of error is equal to 0.001
# </code>
# ### Excercise 2
# Open <code>assembly-data/part1-qc/data/short_reads_1.fastq</code> in FastQC
#
# Answer the following questions:
#
# <code>
# A. Which Phred encoding is used in the FASTQ file for these sequences?
# B. How is the mean per-base score changing along the sequence?
# C. Is this tendency seen in all sequences?
# D. Why is there a warning for the per-base sequence content graphs?
# E. Why is there a warning for the per sequence GC content graphs?
# </code>
# + language="bash"
# fastqc assembly-data/part1_qc/data/short_reads_1.fastq
# -
# ### Answers 2
# <code>
# A. Sanger / Illumina 1.9 encoding
# B. Most of sequences have almost 99% good quality of per read (Mean Sequence Quality is equal to 39). But longer the read, the quality drops.
# C. Yes. On "Per base sequence quality" plot, we can observe that the mean quality drops are fitting into IQR
# D. The "Per base sequence content" plot shows that between 13th and 33rd position, reads have very little information entropy. But from 1st to 13th position entropy is huge.
# E. the GC content is higher than expected in most of the sequences. Distribution of GC is also negatively skewed
# </code>
# ### Excercise 3
# Open <code>assembly-data/part1_qc/data/short_reads_2.fastq </code> in FastQC
#
# Repeat the analysis and answer the following questions:
#
# <code>
# A. What do you make of the quality of the data?
# B. What can we do about it?
# </code>
# + language="bash"
# fastqc assembly-data/part1_qc/data/short_reads_2.fastq
# -
# ### Answers 3
#
# <code>
# A. The quality of data is not as good as in previous example. Some reads fall into the less than 50% quality score
# B. Preprocessing techniques, droping sequences with ambiguous nucleotides, trimming, masking, cutting of 5' and 3' ends, removal of adapters.
# ### Excercise 4
# Open <code>assembly-data/part1_qc/fail-examples/31_S7_L001_R2_001_fastqc.html</code> and <code>assembly-data/part1_qc/fail-examples/50_S26_L001_R1_001_fastqc.html</code>
#
# What can you say about the quality of these data? Can you improve it?
# ### Answers 4
# Quality of both examples is one step from being terrible.
# There are no adapters distinguished and the entropy of information is huge. In my opinion reads are too long for illumina and the quality drops are too significant to ignore them. I think the best solution would be to do sequencing once again.
# ## Assembly
#
# ### de Bruijn graph
# - Named after a Dutch mathematician, <NAME>
# - A directed graph of sequences of symbols
# - Nodes in the graph are k-mers (oligonucleotides of length <i>k</i>)
# - Edges represent consecutive k-mers (which overlap by <i>k</i>-1 symbols)
# <img src="https://i.imgur.com/SdXAgfP.png"
# alt="de Bruijn graph"
# style="float: left; margin-right: 2px;" />
# ### Assembly software
# #### Tools for short read assembly
# - SPAdes
# - MEGAHIT
# - Velvet
# - SKESA
# - IDBA
# - ABySS
#
# #### Long read assembly
# - Canu
# - Wtdbg2
# - Flye
# - Ra
# - Miniasm
# - Shasta
# - [Comparison of tools](https://github.com/rrwick/Long-read-assembler-comparison)
#
# #### State of the art pipeline for hybrid assembly
# [Unicycler](https://github.com/rrwick/Unicycler)
#
# #### Bandage as a tool for viewing assembly graphs
# [Bandage](http://rrwick.github.io/Bandage/)
#
# + language="bash"
# Bandage
# -
# <img src="https://i.imgur.com/irgruKB.png"
# alt="Example of Bandage"
# style="float: left; margin-right: 2px;" />
| Microbial Genomics Group Report Michal Kowalski.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MusicGen
# language: python
# name: musicgen
# ---
# This notebook filters documents by using an SVM
#
# The environment that was used it printed below
print(__import__('sys').version)
# !conda list -n NLP37
# +
# %pylab qt
from sklearn.svm import SVC
from sklearn.calibration import CalibratedClassifierCV
from toolz import curry, compose
import os
try:
import cPickle as pickle
except:
import pickle
# +
def save_pickle(filename, data):
with open(os.path.normpath(filename), 'wb') as open_file:
pickle.dump(data, open_file)
def load_pickle(filename, encoding='utf-8'):
with open(os.path.normpath(filename), 'rb') as open_file:
return pickle.load(open_file, encoding=encoding)#, ignore=True)
# -
# Ensure the input filename matches your output filename of the Doc2Vec notebook
#
# Check input and output names are correct
INPUT_FILENAME = r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\05 Filter Docs\temp\vectorized.pkl'
OUTPUT_FILENAME = r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\05 Filter Docs\temp\filtered.pkl'
CORPUS = r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\__Data__\03 Preprocessing\out\pre_processed.pkl'
SVM = 'Classifiers\svm_wiki.pkl'
# Because the previous notebook uses Python 2.7 we must decode the pickle files by using 'latin1' encoding - this just so happens to be the best one that works in Python
# Decode for Python 3
save_pickle(INPUT_FILENAME, load_pickle(INPUT_FILENAME, encoding='latin1'))
# The next 2 cells will filter the documents
svm_filter_pipe = compose(
curry(save_pickle)(OUTPUT_FILENAME),
lambda x: asarray(load_pickle(CORPUS))[x.astype(bool)],
load_pickle(SVM).predict,
load_pickle,
)
svm_filter_pipe(INPUT_FILENAME)
load_pickle(r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\05 Filter Docs\temp\filtered.pkl')
| 05 Filter Docs/SVM Filter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:KubeflowEnv]
# language: python
# name: conda-env-KubeflowEnv-py
# ---
# # Develop Model
# In this noteook, we will go through the steps to load the pre-trained InceptionV3 model, pre-process the images to the required format and call the model to find the top predictions.
# +
import os
import numpy as np
import tensorflow as tf
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.inception_v3 import preprocess_input
from keras import backend as K
from keras.models import Model
from PIL import Image
from numba import cuda
print(tf.__version__)
# -
# We first load the model in test mode.
K.set_learning_phase(0) # (0=test, 1=train)
# %%time
model = InceptionV3(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
# Here is the summary of the model.
model.summary()
# Next, we serialize the model and get its weights to re-build the model with learning phase hard coded to 0.
config = model.get_config()
weights = model.get_weights()
del model
new_model = Model.from_config(config)
new_model.set_weights(weights)
# Let's test our model with an image of a Lynx.
# !wget https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg
img_path = '220px-Lynx_lynx_poing.jpg'
print(Image.open(img_path).size)
Image.open(img_path)
# Below, we load the image by resizing to (224, 224) and then preprocessing using the methods from keras preprocessing.
img = image.load_img(img_path, target_size=(299, 299))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
# Now, let's call the model on our image to predict the top 3 labels.
# %%time
preds = new_model.predict(img)
print('Predicted:', decode_predictions(preds, top=3))
# The model's top guess is Lynx. We can now move on to exporting the model for TensorFlow serving.
# ## Save Model as a TensorFlow Servable
# TensorFlow Serving requires that the model is saved in [SavedModel](https://www.tensorflow.org/api_docs/python/tf/saved_model) format. We will first create a directory hierarchy which will include a version number for our model and then save it in the required format.
MODEL_DIRECTORY = 'models'
VERSION = '1'
export_path = os.path.join(MODEL_DIRECTORY, VERSION)
print('export_path = {}'.format(export_path))
if os.path.isdir(export_path):
print('Already saved a model, cleaning up.')
# !rm -r {export_path}
# We will now fetch the Keras session and save the model as a servable.
tf.saved_model.simple_save(
K.get_session(),
export_path,
inputs={'input_image': new_model.input},
outputs={t.name:t for t in new_model.outputs})
# Let's clear GPU memory before we move on to the next notebook.
K.clear_session()
del new_model
cuda.select_device(0)
cuda.close()
# Next, we will [serve the exported model with TensorFlow serving image and test locally](01_ServeModelLocally.ipynb).
| 00_DevelopModelServable.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def create_ages(mu=50,sigma=13,num_samples=100,seed=42):
np.random.seed(seed)
sample_ages = np.random.normal(loc=mu,scale=sigma,size=num_samples)
sample_ages = np.round(sample_ages,decimals=0)
return sample_ages
sample = create_ages()
sample
sns.displot(sample,bins=20)
sns.boxplot(x=sample)
# note that using the IQR methodology we find an outlier
# we index the array by calling the Series method and calling it
ser = pd.Series(data=sample)
ser.describe()
IQR = ser.quantile(.75)-ser.quantile(.25)
IQR
1.5*IQR
lower_limit = ser.quantile(.25) - 1.5*IQR
lower_limit
# with a filter we can get rid of the outliers
ser[(ser > lower_limit) & (ser < ser.quantile(.75) + 1.5*IQR)]
# alternative with tuple unpacking
q25, q75 = np.percentile(a=sample, q=[25,75])
iqr = q75 - q25
pd.set_option('display.max_columns',None)
df = pd.read_csv('../prep-data/Ames_Housing_Data.csv')
# +
df.head()
# the label we're trying to predict is the (final) SalePrice. We're to predict the sale price given all these features.
# -
df.corr()['SalePrice'].sort_values()
sns.scatterplot(data=df, x='Overall Qual',y='SalePrice')
sns.scatterplot(data=df, x='Gr Liv Area',y='SalePrice')
df[(df['Overall Qual']>8) & (df['SalePrice']<200000)]
df[(df['Gr Liv Area']>4000) & (df['SalePrice']<400000)]
drop_index = df[(df['Gr Liv Area']>4000) & (df['SalePrice']<400000)].index
df = df.drop(drop_index, axis=0)
sns.scatterplot(data=df, x='Overall Qual',y='SalePrice')
sns.scatterplot(data=df, x='Gr Liv Area',y='SalePrice')
| prep-7-feature-engineering/fe1-outliers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit ('Python38')
# name: python3
# ---
# # Counting Sort
#
# Explanation of counting sort functionality and demonstration of their lineal magnitude order.
#
# ---
# 
#
#
#
# ## Departamento de Ingeniería de Sistemas
# ### Estructuras de Datos
# #### 2021-1
#
# ---
#
# > *<NAME>*
# ## Developer
#
# **<NAME>** computer scientist, engineer and inventor.
#
# **Seward** developed the *radix sort* and *counting sort* algorithms in 1954 at MIT.
#
# 
#
# Born: *July 24, 1930*
#
# Died: *June 19, 2012* (aged 81)
#
# He also worked on the *Whirlwind Computer* (it was among the first digital electronic computers that operated in real-time for output) and developed instruments that powered the guidance systems for the Apollo spacecraft and Polaris missile.
#
#
#
# ## Introducing Counting Sort
#
# ### What is counting sort?
#
# It is a sorting algorithm where all the input in the input $array$ are positive integers.
#
# Counting sort assumes that each element in the input $array$ has a value ranging from $minValue$ to $maxValue$.
#
# - $array$
# - $minValue$
# - $maxValue$
#
#
# +
import numpy.random as rdm
# create one random input array
arr = list(rdm.randint(190,size = 7))
# get the minimum value in the input array
minValue = min(arr)
# get the maximum value in the input array
maxValue = max(arr)
print('array = '+str(arr))
print('minValue = '+ str(minValue))
print('maxValue = '+ str(maxValue))
# print('---this cell runs 3n times')
# -
# ### Functionality
#
# Counting sorts is a special sorting algorithm that runs in linear time
#
# * $O(n+k)$
#
# with respect to two variables:
#
# 1. The length of the input $array$
#
# - $n = length(array)$
#
# 2. The distance betwen the $minValue$ and the $maxValue$
#
# - $k = maxValue - minValue + 1$
# +
# get the length of the input array
n = len(arr)
# get the distance between the minValue and the maxValue
k = maxValue - minValue + 1
print('n = '+str(n))
print('k = '+ str(k))
# print('---this cell runs n+1 times')
# -
# For each element in the $array$, counting sort map it as the index of the $temporal$ array and increasing in 1 the corresponding value.
# - $temporal=[0,0,0, \dots, k]$
# +
# create the temporal array
temporal = [0]*k
# fill the temporal array with the occurrences of each value in the input array
for value in arr:
temporal[value - minValue]+=1
print('temporal = ' +str(temporal))
# print('---this cell runs k+n times')
# print(sum(temporal)==n)
# -
# Finally counting sort use the $temporal$ array index to place the element directly into the correct slot in the $solution$ array.
#
# - $solution=[0,0,0, \dots, n]$
# +
# create the soution array
solution = []
# fill the solution array with the index on the temporal array if the value in it, it's different to 0
for i in range(len(temporal)):
while temporal[i]>0:
solution.append(i+minValue)
temporal[i]-=1
print('solution = '+ str(solution))
# print('---this cell runs 1+k+n times')
# -
# I want to graph an array with points in the form $(n+k, O(n+k))$, with $n$ increasiong, so i define the next function to perform the counting sort for $n$ variable and return the value of $n + k$.
#
# ## $O(n+k)$
# ### Demonstration of the magnitude order
#
# First I'll get all the times that each cell was executed and then I'll define the magnitud order function for the code i wrote.
#
# ### Cell 1 iterations $\sum_{i=1}^{n} 1 +\sum_{i=1}^{n} 1 +\sum_{i=1}^{n} 1 = 3n$
# ### Cell 2 iterations $(\sum_{i=1}^{n} 1 )+ 1 = n +1$
# ### Cell 3 iterations $\sum_{i=1}^{k} 1 + \sum_{i=1}^{n} 1 = k+n$
# ### Cell 4 iterations $1 + \sum_{i=1}^{k} 1+ \sum_{i=1}^{n} 1 = 1+k+n$
#
# $O(n+k) = \sum_{i=1}^{n} 1 +\sum_{i=1}^{n} 1 +\sum_{i=1}^{n} 1 + (\sum_{i=1}^{n} 1 )+ 1 + \sum_{i=1}^{k} 1 + (\sum_{i=1}^{n} 1) + 1 + \sum_{i=1}^{k} 1+ \sum_{i=1}^{n} 1 $
#
# $O(n+k) = 3n+n+1+k+n+1+k+n $
#
# $O(n+k) = 6n+2k+2$
def countingSorn (n):
import numpy.random as rdm
# create one random input array
arr = list(rdm.randint(n**2, size = n))
# get the minimum value in the input array
minValue = min(arr)
# get the maximum value in the input array
maxValue = max(arr)
# get the length of the input array
n = len(arr)
# get the distance between the minValue and the maxValue
k = maxValue - minValue + 1
# create the temporal array
temporal = [0]*k
# fill the temporal array with the occurrences of each value in the input array
for value in arr:
temporal[value - minValue]+=1
# create the soution array
solution = []
# fill the solution array with the index on the temporal array if the value in it, it's different to 0
for i in range(len(temporal)):
while temporal[i]>0:
solution.append(i+minValue)
temporal[i]-=1
# return an array tith the time of execution and the sum n + k
return n+k
# Finally I creste 2 arrays:
# - $timer = [O(n_1 + k_1)_1, ..., O(n_i + k_i)_i], i \in [1, n]$ for each time that take the run of the counting sort for each $nk = [(n_1 + k_1), ..., (n_i + k_i)]$
# +
import matplotlib.pyplot as plt
plt.style.use('Solarize_Light2')
import numpy as np
# create array to fill it with the time of execution
timer = []
# create array to fill it with the value of each n + k
nk = []
# create linear space to map the counting sort and give a continuous grid layer
n_set = np.linspace(1, 500, 400).astype(int)
# run the counting sort 20 times to fill the arrays
for n in n_set:
# t = %timeit -o -q -r 1 -n 1 nk.append(countingSorn(n))
timer.append(t.worst)
# plot the arrays to see the results
plt.xlabel("n + k")
plt.ylabel("t (s)")
plt.plot(np.log(nk), np.log(timer), color="red")
plt.legend([f"n + k [{nk[0]}, {nk[-1]}]"], loc="upper left")
# -
| countingSort.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Flatten, BatchNormalization, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix
import itertools
import os
import shutil
import random
import glob
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action='ignore',category=FutureWarning)
# %matplotlib inline
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print("Num GPUs Available: ", len(physical_devices))
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# ## Data preparation
# +
os.chdir('data/dogs-vs-cats')
if os.path.isdir('train/dog') is False:
os.makedirs('train/dog')
os.makedirs('train/cat')
os.makedirs('valid/dog')
os.makedirs('valid/cat')
os.makedirs('test/dog')
os.makedirs('test/cat')
for c in random.sample(glob.glob('cat*'), 500):
shutil.move(c, 'train/cat')
for c in random.sample(glob.glob('dog*'), 500):
shutil.move(c, 'train/dog')
for c in random.sample(glob.glob('cat*'), 100):
shutil.move(c, 'valid/cat')
for c in random.sample(glob.glob('dog*'), 100):
shutil.move(c, 'valid/dog')
for c in random.sample(glob.glob('cat*'), 50):
shutil.move(c, 'test/cat')
for c in random.sample(glob.glob('dog*'), 50):
shutil.move(c, 'test/dog')
os.chdir('../../')
# -
train_path = 'data/dogs-vs-cats/train'
valid_path = 'data/dogs-vs-cats/valid'
test_path = 'data/dogs-vs-cats/test'
train_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=train_path, target_size=(224,224), classes=['cat','dog'], batch_size=10)
valid_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=valid_path, target_size=(224,224), classes=['cat','dog'], batch_size=10)
test_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=test_path, target_size=(224,224), classes=['cat','dog'], batch_size=10, shuffle=False)
assert train_batches.n == 1000
assert valid_batches.n == 200
assert test_batches.n == 100
assert train_batches.num_classes == valid_batches.num_classes == test_batches.num_classes == 2
imgs, labels = next(train_batches)
# Plot the images from train_batches
def plotImages(images_arr):
fig, axes = plt.subplots(1,10,figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
# + jupyter={"outputs_hidden": true}
plotImages(imgs)
print(labels)
# -
model = Sequential([
Conv2D(filters=32, kernel_size=(3,3), activation='relu', padding='same', input_shape=(224,224,3)),
MaxPool2D(pool_size=(2,2), strides=2),
Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding='same'),
MaxPool2D(pool_size=(2,2), strides=2),
Flatten(),
Dense(units=2, activation='softmax')
])
model.summary()
model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x=train_batches, validation_data=valid_batches, epochs=10, verbose=2)
| src/plotter/jupyter/.ipynb_checkpoints/CNN-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="fEScvF6Y3xXC"
# <a href="https://colab.research.google.com/github/timeseriesAI/tsai/blob/master/tutorial_nbs/10_Time_Series_Classification_and_Regression_with_MiniRocket.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="HBJSf2VW30yg"
# created by <NAME> and <NAME> (<EMAIL>) based on:
#
# * <NAME>., <NAME>., & <NAME>. (2020). MINIROCKET: A Very Fast (Almost) Deterministic Transform for Time Series Classification. arXiv preprint arXiv:2012.08791.
#
# * Original paper: https://arxiv.org/abs/2012.08791
#
# * Original code: https://github.com/angus924/minirocket
# + [markdown] heading_collapsed=true id="s4XIjQJqOExR"
# # MiniRocket 🚀
#
# > A Very Fast (Almost) Deterministic Transform for Time Series Classification.
# + [markdown] hidden=true id="rTlLcH2DMusP"
# ROCKET is a type of time series classification and regression methods that is different to the
# ones you may be familiar with. Typical machine learning classifiers will
# optimize the weights of convolutions, fully-connected, and pooling layers,
# learning a configuration of weights that classifies the time series.
#
# In contrast, ROCKET applies a large number of fixed, non-trainable, independent convolutions
# to the timeseries. It then extracts a number of features from each convolution
# output (a form of pooling), generating typically 10000 features per sample. (These
# features are simply floating point numbers.)
#
# The features are stored so that they can be used multiple times.
# It then learns a simple linear head to predict each time series sample from its features.
# Typical PyTorch heads might be based on Linear layers. When the number of training samples is small,
# sklearn's RidgeClassifier is often used.
#
# The convolutions' fixed weights and the pooling method have been chosen experimentally to
# effectively predict a broad range of real-world time series.
#
# The original ROCKET method used a selection of fixed convolutions with weights
# chosen according to a random distribution. Building upon the lessons learned
# from ROCKET, MiniRocket refines the convolutions to a specific pre-defined set
# that proved to be at least as effective ROCKET's. It is also much faster
# to calculate than the original ROCKET. Actually, the paper authors "suggest that MiniRocket should now be considered and used as the default variant of Rocket."
#
# MiniROCKET was implemented in Python using numba acceleration and mathematical
# speedups specific to the algorithm. It runs quite fast, utilizing CPU cores in
# parallel. Here we present a 2 implementations of MiniRocket:
# * a cpu version with an sklearn-like API (that can be used with small datasets - <10k samples), and
# * a PyTorch implementation of MiniRocket, optimized for
# the GPU. It runs faster (3-25x depending on your GPU) than the CPU version and offers some flexibility for further experimentation.
#
# We'll demonstrate how you can use both of them througout this notebook.
# + [markdown] heading_collapsed=true id="1XQZaahyQCxf"
# # Import libraries 📚
# + hidden=true id="Dl8qBv19Pj3S"
# ## NOTE: UNCOMMENT AND RUN THIS CELL IF YOU NEED TO INSTALL/ UPGRADE TSAI
# stable = False # True: latest version from github, False: stable version in pip
# if stable:
# # !pip install tsai -U >> /dev/null
# else:
# # !pip install git+https://github.com/timeseriesAI/tsai.git -U >> /dev/null
# ## NOTE: REMEMBER TO RESTART YOUR RUNTIME ONCE THE INSTALLATION IS FINISHED
# + [markdown] hidden=true id="TQYbJYGXPvVE"
# Restart your runtime before running the cells below.
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="-c-1XUgyP2L4" outputId="6a5c955d-f617-4ab3-8d34-35f57e3716f4"
from tsai.all import *
print('tsai :', tsai.__version__)
print('fastai :', fastai.__version__)
print('fastcore :', fastcore.__version__)
print('torch :', torch.__version__)
# + [markdown] heading_collapsed=true id="sKD5F_WLOExX"
# # Using MiniRocket 🚀
# + [markdown] hidden=true id="i_4xekpuNP-K"
# * First, create the features for each timeseries sample using the MiniRocketFeatures module (MRF).
# MRF takes a minibatch of time series samples and outputs their features. Choosing an appropriate minibatch size
# allows training sets of any size to be used without exhausting CPU or GPU memory.
#
# Typically, 10000 features will characterize each sample. These features are relatively
# expensive to create, but once created they are fixed and may be used as the
# input for further training. They might be saved for example in memory or on disk.
#
#
# * Next, the features are sent to a linear model. The original
# MiniRocket research used sklearn's RidgeClassifier. When the number of samples
# goes beyond the capacity of RidgeClassifier, a deep learning "Head" can be
# used instead to learn the classification/regression from minibatches of features.
#
# For the following demos, we use the tsai package to handle timeseries efficiently and clearly. tsai is fully integrated with fastai, allowing fastai's training loop and other convenience to be used. To learn more about tsai, please check out the docs and tutorials at https://github.com/timeseriesAI/tsai
#
# Let's get started.
# + [markdown] hidden=true id="Y1QUkSiMTfnx"
# ## sklearn-type API (<10k samples) 🚶🏻♂️
# + [markdown] heading_collapsed=true hidden=true id="hT3Hjp_aWEBL"
# ### Classifier
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="Soi5dGpyTnRO" outputId="bd9c2e3a-e637-4d6d-a0e8-72cd7681b570"
# Univariate classification with sklearn-type API
dsid = 'OliveOil'
X_train, y_train, X_valid, y_valid = get_UCR_data(dsid) # Download the UCR dataset
# Computes MiniRocket features using the original (non-PyTorch) MiniRocket code.
# It then sends them to a sklearn's RidgeClassifier (linear classifier).
model = MiniRocketClassifier()
timer.start(False)
model.fit(X_train, y_train)
t = timer.stop()
print(f'valid accuracy : {model.score(X_valid, y_valid):.3%} time: {t}')
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="VL5Aax4MUI5F" outputId="4aa367cf-f65d-4183-fcb5-f01366b7a287"
# Multivariate classification with sklearn-type API
dsid = 'LSST'
X_train, y_train, X_valid, y_valid = get_UCR_data(dsid)
model = MiniRocketClassifier()
timer.start(False)
model.fit(X_train, y_train)
t = timer.stop()
print(f'valid accuracy : {model.score(X_valid, y_valid):.3%} time: {t}')
# + [markdown] hidden=true id="0-ZGYs5Vt_1u"
# One way to try to improve performance is to use an ensemble (that uses majority vote). Bear in mind that the ensemble will take longer since multiple models will be fitted.
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="jEU38iCRtlba" outputId="eccb41b3-a7b9-4898-cff4-cdbfa3a0d34b"
# Multivariate classification ensemble with sklearn-type API
dsid = 'LSST'
X_train, y_train, X_valid, y_valid = get_UCR_data(dsid)
model = MiniRocketVotingClassifier(n_estimators=5)
timer.start(False)
model.fit(X_train, y_train)
t = timer.stop()
print(f'valid accuracy : {model.score(X_valid, y_valid):.3%} time: {t}')
# + [markdown] hidden=true id="QhdI9Ltdku-x"
# In this case, we see an increase in accuracy although this may not be the case with other datasets.
# + [markdown] hidden=true id="PN9vNWT5xcSO"
# Once a model is trained, you can always save it for future inference:
# + hidden=true id="vKez0qjLxhzq"
dsid = 'LSST'
X_train, y_train, X_valid, y_valid = get_UCR_data(dsid)
model = MiniRocketClassifier()
model.fit(X_train, y_train)
model.save(f'MiniRocket_{dsid}')
del model
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="ECAaFHfDxt3v" outputId="0203196e-d706-4401-db51-e1aa370490e4"
model = load_minirocket(f'MiniRocket_{dsid}')
print(f'valid accuracy : {model.score(X_valid, y_valid):.3%}')
# + [markdown] hidden=true id="XVhSkHhAWHjT"
# ### Regressor
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="tvmgrKIdVLyp" outputId="c4917ee4-384a-48b2-cf59-b111e5327ba4"
# Univariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'Covid3Month'
X_train, y_train, X_valid, y_valid = get_Monash_regression_data(dsid)
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
model = MiniRocketRegressor(scoring=rmse_scorer)
timer.start(False)
model.fit(X_train, y_train)
t = timer.stop()
y_pred = model.predict(X_valid)
rmse = mean_squared_error(y_valid, y_pred, squared=False)
print(f'valid rmse : {rmse:.5f} time: {t}')
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="Xgh0AH52vg_x" outputId="d791c2c5-1e05-45e4-95ee-2b99cb5f9fcb"
# Univariate regression ensemble with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'Covid3Month'
X_train, y_train, X_valid, y_valid = get_Monash_regression_data(dsid)
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
model = MiniRocketVotingRegressor(n_estimators=5, scoring=rmse_scorer)
timer.start(False)
model.fit(X_train, y_train)
t = timer.stop()
y_pred = model.predict(X_valid)
rmse = mean_squared_error(y_valid, y_pred, squared=False)
print(f'valid rmse : {rmse:.5f} time: {t}')
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="75vhN3E1TzmB" outputId="f651eed0-1c0f-438e-d4d8-6051bd3d19cb"
# Multivariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'AppliancesEnergy'
X_train, y_train, X_valid, y_valid = get_Monash_regression_data(dsid)
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
model = MiniRocketRegressor(scoring=rmse_scorer)
timer.start(False)
model.fit(X_train, y_train)
t = timer.stop()
y_pred = model.predict(X_valid)
rmse = mean_squared_error(y_valid, y_pred, squared=False)
print(f'valid rmse : {rmse:.5f} time: {t}')
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="XXEQiTaCumdb" outputId="706524c9-6395-48ac-8d09-499bd9ebf36e"
# Multivariate regression ensemble with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'AppliancesEnergy'
X_train, y_train, X_valid, y_valid = get_Monash_regression_data(dsid)
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
model = MiniRocketVotingRegressor(n_estimators=5, scoring=rmse_scorer)
timer.start(False)
model.fit(X_train, y_train)
t = timer.stop()
y_pred = model.predict(X_valid)
rmse = mean_squared_error(y_valid, y_pred, squared=False)
print(f'valid rmse : {rmse:.5f} time: {t}')
# + [markdown] hidden=true id="YyP_GnMqymq6"
# We'll also save this model for future inference:
# + hidden=true id="F99gD-f0yr-f"
# Multivariate regression ensemble with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'AppliancesEnergy'
X_train, y_train, X_valid, y_valid = get_Monash_regression_data(dsid)
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
model = MiniRocketVotingRegressor(n_estimators=5, scoring=rmse_scorer)
model.fit(X_train, y_train)
model.save(f'MRVRegressor_{dsid}')
del model
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="bQwFqBx5y7Dl" outputId="d668c966-859f-4a6f-c2d6-827cc3c1c7a9"
model = load_minirocket(f'MRVRegressor_{dsid}')
y_pred = model.predict(X_valid)
rmse = mean_squared_error(y_valid, y_pred, squared=False)
print(f'valid rmse : {rmse:.5f}')
# + [markdown] heading_collapsed=true hidden=true id="HgEp5d7ZWeQp"
# ## Pytorch implementation (any # samples) 🏃
# + [markdown] heading_collapsed=true hidden=true id="lGvUHAz4WqyK"
# ### Offline feature calculation
# + [markdown] hidden=true id="dYGS9Boj0jFK"
# In the offline calculation, all features will be calculated in a first stage and then passed to the dataloader that will create batches. This features will ramain the same throughout training.
#
# ⚠️ In order to avoid leakage when using the offline feature calculation, it's important to fit MiniRocketFeatures using just the train samples. You should pass X_train as a torch tensor.
# + hidden=true id="xGt5RVc_9xiE"
# Create the MiniRocket features and store them in memory.
dsid = 'LSST'
X, y, splits = get_UCR_data(dsid, split_data=False)
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="VIMBShJVWnu2" outputId="cae5c0f5-8531-4fb7-c8bf-996b4dfa8e87"
mrf = MiniRocketFeatures(X.shape[1], X.shape[2]).to(default_device())
X_train = torch.from_numpy(X[splits[0]]).to(default_device())
mrf.fit(X_train)
X_feat = get_minirocket_features(X, mrf, chunksize=1024, to_np=True)
X_feat.shape, type(X_feat)
# + [markdown] hidden=true id="8pEVKgvCFnur"
# We'll save this model, as we'll need it to create features in the future.
# + hidden=true id="WcYH5LqOFbhL"
PATH = Path("./models/MRF.pt")
PATH.parent.mkdir(parents=True, exist_ok=True)
torch.save(mrf.state_dict(), PATH)
# + [markdown] hidden=true id="XmDfJsFsxCGE"
# As you can see the shape of the minirocket features is [sample_size x n_features x 1]. The last dimension (1) is added because `tsai` expects input data to have 3 dimensions, although in this case there's no longer a temporal dimension.
#
# Once the features are calculated, we'll need to train a Pytorch model. We'll use a simple linear model:
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="EcIuOC03y1kY" outputId="d588f851-8666-4acb-98a3-8e221fe807da"
# Using tsai/fastai, create DataLoaders for the features in X_feat.
tfms = [None, TSClassification()]
batch_tfms = TSStandardize(by_sample=True)
dls = get_ts_dls(X_feat, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
# model is a linear classifier Head
model = build_ts_model(MiniRocketHead, dls=dls)
model.head
# + colab={"base_uri": "https://localhost:8080/", "height": 301} hidden=true id="sbwTsD4VdMoD" outputId="b4458fd4-0915-42a9-faa4-2a63b92fb3d0"
# Using tsai/fastai, create DataLoaders for the features in X_feat.
tfms = [None, TSClassification()]
batch_tfms = TSStandardize(by_sample=True)
dls = get_ts_dls(X_feat, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
# model is a linear classifier Head
model = build_ts_model(MiniRocketHead, dls=dls)
# Drop into fastai and use it to find a good learning rate.
learn = Learner(dls, model, metrics=accuracy, cbs=ShowGraph())
learn.lr_find()
# + colab={"base_uri": "https://localhost:8080/", "height": 925} hidden=true id="trFudsPMF8oi" outputId="57313116-7a25-459a-e5f0-8868b955f2bf"
# As above, use tsai to bring X_feat into fastai, and train.
tfms = [None, TSClassification()]
batch_tfms = TSStandardize(by_sample=True)
dls = get_ts_dls(X_feat, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
model = build_ts_model(MiniRocketHead, dls=dls)
learn = Learner(dls, model, metrics=accuracy, cbs=ShowGraph())
timer.start()
learn.fit_one_cycle(10, 3e-4)
timer.stop()
# + [markdown] hidden=true id="2ZrPlovSF-iS"
# We'll now save the learner for inference:
# + hidden=true id="UW3rynV2IvQ-"
PATH = Path('./models/MRL.pkl')
PATH.parent.mkdir(parents=True, exist_ok=True)
learn.export(PATH)
# + [markdown] heading_collapsed=true hidden=true id="zvRTB0giGRVY"
# #### Inference:
# + [markdown] hidden=true id="5ELGao7iGTqE"
# For inference we'll need to follow the same process as before:
#
# 1. Create the features
# 2. Create predictions for those features
# + [markdown] hidden=true id="z1hURSNWsGU-"
# Let's recreate mrf (MiniRocketFeatures) to be able to create new features:
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="sqN9Y4yCGpRU" outputId="67750d9d-9c4c-40cb-cb19-1508155a3799"
mrf = MiniRocketFeatures(X.shape[1], X.shape[2]).to(default_device())
PATH = Path("./models/MRF.pt")
mrf.load_state_dict(torch.load(PATH))
# + [markdown] hidden=true id="DfCjWlPTrTMe"
# We'll create new features. In this case we'll use the valid set to confirm the predictions accuracy matches the one at the end of training, but you can use any data:
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="tVsJZDpaGeUe" outputId="09e7fd8a-6c89-4362-b531-ee86b4c8e560"
new_feat = get_minirocket_features(X[splits[1]], mrf, chunksize=1024, to_np=True)
new_feat.shape, type(new_feat)
# + [markdown] hidden=true id="iSHoac5qrpEp"
# We'll now load the saved learner:
# + hidden=true id="Oahyk36SG-lD"
PATH = Path('./models/MRL.pkl')
learn = load_learner(PATH, cpu=False)
# + [markdown] hidden=true id="t5NzEzjtsdxE"
# and pass the newly created features
# + colab={"base_uri": "https://localhost:8080/", "height": 36} hidden=true id="8q0ihEO7HFCF" outputId="7f7c239e-beee-4d69-905c-b451b838b158"
probas, _, preds = learn.get_X_preds(new_feat)
preds
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="AAo70Rh5Hied" outputId="f26bbfae-13cc-4e10-c56c-5cd9f538f6a4"
skm.accuracy_score(y[splits[1]], preds)
# + [markdown] hidden=true id="zNhdDnxGsTAc"
# Ok, so the predictions match the ones at the end of training as this accuracy is the same on we got in the end.
# + [markdown] heading_collapsed=true hidden=true id="29obhugoWvIG"
# ### Online feature calculation
# + [markdown] hidden=true id="XPsRLlUd7C1z"
# MiniRocket can also be used online, re-calculating the features each minibatch. In this scenario, you do not calculate fixed features one time. The online mode is a bit slower than the offline scanario, but offers more flexibility. Here are some potential uses:
#
# * You can experiment with different scaling techniques (no standardization, standardize by sample, normalize, etc).
# * You can use data augmentation is applied to the original time series.
# * Another use of online calculation is to experiment with training the kernels and biases.
# To do this requires modifications to the MRF code.
# + colab={"base_uri": "https://localhost:8080/", "height": 301} hidden=true id="wFYPqzqNW020" outputId="b1b693f9-91df-4f86-fa28-7421894ad026"
tfms = [None, TSClassification()]
batch_tfms = TSStandardize(by_sample=True)
dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
model = build_ts_model(MiniRocket, dls=dls)
learn = Learner(dls, model, metrics=accuracy, cbs=ShowGraph())
learn.lr_find()
# + [markdown] hidden=true id="W2KUvs-K1m4s"
# Notice 2 important differences with the offline scenario:
#
# * in this case we pass X to the dataloader instead of X_tfm. The featurew will be calculated within the model.
# * we use MiniRocket instead of MiniRocketHead. MiniRocket is a Pytorch version that calculates features on the fly before passing them to a linear head.
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="hwZ0mNlm2Liz" outputId="68dc851a-4170-4ac0-d39c-215e3546f61c"
tfms = [None, TSClassification()]
batch_tfms = TSStandardize(by_sample=True)
dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
model = build_ts_model(MiniRocket, dls=dls)
model
# + colab={"base_uri": "https://localhost:8080/", "height": 925} hidden=true id="NnVPBsHo1MEJ" outputId="00b6ff72-482b-4fff-9ac0-8d576401e0e5"
tfms = [None, TSClassification()]
batch_tfms = TSStandardize(by_sample=True)
dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
model = build_ts_model(MiniRocket, dls=dls)
learn = Learner(dls, model, metrics=accuracy, cbs=ShowGraph())
timer.start()
learn.fit_one_cycle(10, 3e-4)
timer.stop()
# + [markdown] hidden=true id="olFvfEUC2V1g"
# Since we calculate the minirocket features within the model, we now have the option to use data augmentation for example:
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} hidden=true id="4Mgf90JREbvx" outputId="ed448602-f246-405d-e05e-a61b6ebc4f9a"
# MiniRocket with data augmentation
tfms = [None, TSClassification()]
batch_tfms = [TSStandardize(by_sample=True), TSMagScale(), TSWindowWarp()]
dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
model = build_ts_model(MiniRocket, dls=dls)
learn = Learner(dls, model, metrics=accuracy, cbs=[ShowGraph()])
learn.fit_one_cycle(20, 3e-4)
# + [markdown] hidden=true id="LxoLvPY12YeA"
# In this case, we can see that using MiniRocket (Pytorch implementation) with data augmentation achieves an accuracy of 69%+, compared to the sklearn-API implementation which is around 65%.
# + [markdown] hidden=true id="0GKfqdzW6k0B"
# Once you have trained the model, you can always save if for future use. We just need to export the learner:
# + hidden=true id="KCjvvcE4_ixr"
PATH = Path('./models/MiniRocket_aug.pkl')
PATH.parent.mkdir(parents=True, exist_ok=True)
learn.export(PATH)
# + hidden=true id="q3Jr7_cV_37Q"
del learn
# + [markdown] heading_collapsed=true hidden=true id="uTA4vqzcDoXO"
# #### Inference
# + [markdown] hidden=true id="g4xNxgvJt-Mu"
# Let's first recreate the learner:
# + hidden=true id="W889ynbit-ca"
PATH = Path('./models/MiniRocket_aug.pkl')
learn = load_learner(PATH, cpu=False)
# + [markdown] hidden=true id="qzbV4CaCEESa"
# We are now ready to generate predictions. We'll confirm it worls well with the valid dataset:
# + colab={"base_uri": "https://localhost:8080/", "height": 36} hidden=true id="lb-bki7kAO5q" outputId="1fb2facf-5c3a-4afd-fa10-cb5d285b0975"
probas, _, preds = learn.get_X_preds(X[splits[1]])
preds
# + [markdown] hidden=true id="JOgpiGFh7FbZ"
# We can see that the validation loss & metrics are the same we had when we saved it.
# + colab={"base_uri": "https://localhost:8080/"} hidden=true id="bX2TebHyDSF_" outputId="6b59a456-ff68-4281-d144-5e5ae52c3992"
skm.accuracy_score(y[splits[1]], preds)
# + [markdown] heading_collapsed=true id="wnm2UjfW9Eyf"
# # Conclusion ✅
# + [markdown] hidden=true id="vMJqJj-49HXN"
# MiniRocket is a new type of algorithm that is significantly faster than any other method of comparable accuracy (including Rocket), and significantly more accurate than any other method of even roughly-similar computational expense.
#
# `tsai` supports the 2 variations of MiniRocket introduced in this notebook. A cpu version (that can be used with relatively small datasets, with <10k samples) and a gpu (Pytorch) version that can be used with datasets of any size. The Pytorch version can be used in an offline mode (pre-calculating all features before fitting the model) or in an online mode (calculating features on the fly).
#
# We believe MiniRocket is a great new tool, and encourange you to try it in your next Time Series Classification or Regression task.
| tutorial_nbs/10_Time_Series_Classification_and_Regression_with_MiniRocket.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import pandas as pd
import numpy as np
# +
coord = pd.read_csv('./shape013_polygons.csv',
header = None,
index_col = 0)
coord.columns = ['latitude_min', 'latitude_max',
'longitude_min', 'longitude_max',
'code', 'source', 'STAT_LEVL']
coord["lat_avg"] = (coord.latitude_max+coord.latitude_min)/2
coord["lgt_avg"] = (coord.longitude_max+coord.longitude_min)/2
coord['code'] = coord.code.apply(lambda x : x.split('.')[2])
coord = coord[['code', 'lat_avg', 'lgt_avg']]
coordinates_dict = coord.set_index('code').to_dict()
# -
with open('./NUTS_RG_60M_2016_4326.json', 'rb') as f:
nuts3 = json.load(f)
countries = [
{'name': j['properties']['NUTS_NAME'],
'cca2':j['properties']['NUTS_ID'],
'lat':np.round(coordinates_dict['lat_avg'].get(j['properties']['NUTS_ID']),2),
'lng':np.round(coordinates_dict['lgt_avg'].get(j['properties']['NUTS_ID']),2),
'cca3': j['properties']['NUTS_ID'],
'cioc': j['properties']['NUTS_ID'],
} for j in nuts3['objects']['NUTS_RG_60M_2016_4326']['geometries']
if coordinates_dict['lat_avg'].get(j['properties']['NUTS_ID'], 'null') is not 'null'
]
with open('countries.json', 'w') as f:
json.dump(countries, f)
| superset/data/create_countries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Hv-tVJksaXvl"
# # Off-Policy Learning in Two-stage Recommender Systems
# + [markdown] id="HojVVZWkoN8I"
# Many real-world recommender systems need to be highly scalable: matching millions of items with billions of users, with milliseconds latency. The scalability requirement has led to widely used two-stage recommender systems, consisting of efficient candidate generation model(s) in the first stage and a more powerful ranking model in the second stage.
#
# Logged user feedback, e.g., user clicks or dwell time, are often used to build both candidate generation and ranking models for recommender systems. While it’s easy to collect large amount of such data, they are inherently biased because the feedback can only be observed on items recommended by the previous systems. Recently, off-policy correction on such biases have attracted increasing interest in the field of recommender system research. However, most existing work either assumed that the recommender system is a single-stage system or only studied how to apply off-policy correction to the candidate generation stage of the system without explicitly considering the interactions between the two stages.
#
# In this work, we propose a two-stage off-policy policy gradient method, and showcase that ignoring the interaction between the two stages leads to a sub-optimal policy in two-stage recommender systems. The proposed method explicitly takes into account the ranking model when training the candidate generation model, which helps improve the performance of the whole system. We conduct experiments on real-world datasets with large item space and demonstrate the effectiveness of our proposed method.
#
# ## Pseudo code
#
# 
#
# ## Model structure
#
# 
#
# 
#
# ## Training model
#
# 1. The simulation model - divides MovieLens-1M into training set, validation set and test set at 3:1:1.
# 2. Behavior strategy model and ranking model - Use 10,000 user-item pairs randomly generated by the simulation model to train the behavior strategy model, and then obtain a bandit data set by sampling the top-5 items of each user predicted by the behavior strategy model. Train the ranking model based on this data set, divide 2000 users as the verification set, and 4000 users as the test set.
# 3. Candidate generation model - Set the optimizer to AdaGrad, the initial learning rate is 0.05, and the weight limit parameters of 1-IPS and 2-IPS are set c1 = 10, c2 = 0.01 c_1 = 10, c_2=0.01.
# For each training method, we trained 20 candidate generation models initialized with different random seeds. The early stopping method is applied in both one-stage evaluation and two-stage evaluation.
# + [markdown] id="MicXUzrTYrJk"
# ## Imports
# + id="LlZYJdfqS9ON"
import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
# + [markdown] id="QVgH_-gKXPnn"
# ## Model
# + id="lBqeMgi0XQ10"
NUM_ITEMS = 3883
NUM_YEARS = 81
NUM_GENRES = 18
NUM_USERS = 6040
NUM_OCCUPS = 21
NUM_AGES = 7
NUM_ZIPS = 3439
# + id="oWN7pQP0XXe_"
class ItemRep(nn.Module):
"""Item representation layer."""
def __init__(self, item_emb_size=10, year_emb_size=5, genre_hidden=5):
super(ItemRep, self).__init__()
self.item_embedding = nn.Embedding(
NUM_ITEMS + 1, item_emb_size, padding_idx=0)
self.year_embedding = nn.Embedding(NUM_YEARS, year_emb_size)
self.genre_linear = nn.Linear(NUM_GENRES, genre_hidden)
self.rep_dim = item_emb_size + year_emb_size + genre_hidden
def forward(self, categorical_feats, real_feats):
out = torch.cat(
[
self.item_embedding(categorical_feats[:, 0]),
self.year_embedding(categorical_feats[:, 1]),
self.genre_linear(real_feats)
],
dim=1)
return out
# + id="4vrFd9FwXWGP"
class UserRep(nn.Module):
"""User representation layer."""
def __init__(self, user_emb_size=10, feature_emb_size=5):
super(UserRep, self).__init__()
self.user_embedding = nn.Embedding(
NUM_USERS + 1, user_emb_size, padding_idx=0)
self.gender_embedding = nn.Embedding(2, feature_emb_size)
self.age_embedding = nn.Embedding(NUM_AGES, feature_emb_size)
self.occup_embedding = nn.Embedding(NUM_OCCUPS, feature_emb_size)
self.zip_embedding = nn.Embedding(NUM_ZIPS, feature_emb_size)
self.rep_dim = user_emb_size + feature_emb_size * 4
def forward(self, categorical_feats, real_feats=None):
reps = [
self.user_embedding(categorical_feats[:, 0]),
self.gender_embedding(categorical_feats[:, 1]),
self.age_embedding(categorical_feats[:, 2]),
self.occup_embedding(categorical_feats[:, 3]),
self.zip_embedding(categorical_feats[:, 4])
]
out = torch.cat(reps, dim=1)
return out
# + id="ni5trB4aXUxc"
class ImpressionSimulator(nn.Module):
"""Simulator model that predicts the outcome of impression."""
def __init__(self, hidden=100, use_impression_feats=False):
super(ImpressionSimulator, self).__init__()
self.user_rep = UserRep()
self.item_rep = ItemRep()
self.use_impression_feats = use_impression_feats
input_dim = self.user_rep.rep_dim + self.item_rep.rep_dim
if use_impression_feats:
input_dim += 1
self.linear = nn.Sequential(
nn.Linear(input_dim, hidden),
nn.ReLU(), nn.Linear(hidden, 50), nn.ReLU(), nn.Linear(50, 1))
def forward(self, user_feats, item_feats, impression_feats=None):
users = self.user_rep(**user_feats)
items = self.item_rep(**item_feats)
inputs = torch.cat([users, items], dim=1)
if self.use_impression_feats:
inputs = torch.cat([inputs, impression_feats["real_feats"]], dim=1)
return self.linear(inputs).squeeze()
# + id="eA_V_mUfXTIS"
class Nominator(nn.Module):
"""Two tower nominator model."""
def __init__(self):
super(Nominator, self).__init__()
self.item_rep = ItemRep()
self.user_rep = UserRep()
self.linear = nn.Linear(self.user_rep.rep_dim, self.item_rep.rep_dim)
self.binary = True
def forward(self, user_feats, item_feats):
users = self.linear(F.relu(self.user_rep(**user_feats)))
users = torch.unsqueeze(users, 2) # (b, h) -> (b, h, 1)
items = self.item_rep(**item_feats)
if self.binary:
items = torch.unsqueeze(items, 1) # (b, h) -> (b, 1, h)
else:
items = torch.unsqueeze(items, 0).expand(users.size(0), -1,
-1) # (c, h) -> (b, c, h)
logits = torch.bmm(items, users).squeeze()
return logits
def set_binary(self, binary=True):
self.binary = binary
# + id="edS3BzfQXRzW"
class Ranker(nn.Module):
"""Ranker model."""
def __init__(self):
super(Ranker, self).__init__()
self.item_rep = ItemRep()
self.user_rep = UserRep()
self.linear = nn.Linear(self.user_rep.rep_dim + 1,
self.item_rep.rep_dim)
self.binary = True
def forward(self, user_feats, item_feats, impression_feats):
users = self.user_rep(**user_feats)
context_users = torch.cat(
[users, impression_feats["real_feats"]], dim=1)
context_users = self.linear(context_users)
context_users = torch.unsqueeze(context_users,
2) # (b, h) -> (b, h, 1)
items = self.item_rep(**item_feats)
if self.binary:
items = torch.unsqueeze(items, 1) # (b, h) -> (b, 1, h)
else:
items = torch.unsqueeze(items, 0).expand(
users.size(0), -1, -1) # (c, h) -> (b, c, h), c=#items
logits = torch.bmm(items, context_users).squeeze()
return logits
def set_binary(self, binary=True):
self.binary = binary
# + [markdown] id="_wp1K2tmZavh"
# ## Download data
# + colab={"base_uri": "https://localhost:8080/"} id="TG0jGrFFZcjS" executionInfo={"status": "ok", "timestamp": 1633453276159, "user_tz": -330, "elapsed": 789, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="82905f21-cb45-4ee8-eff9-74b6fc1d8875"
# !wget -q --show-progress https://files.grouplens.org/datasets/movielens/ml-1m.zip
# !unzip ml-1m.zip
# + [markdown] id="l0VwA1nHS-Jn"
# ## Dataset
# + [markdown] id="QQJ1yQJqS-GS"
# Create a torch dataset class for ML-1M; convert the label into binary labels (positive if rating > 3).
# + id="VbiK3zhUXiPZ"
class MovieLensDataset(Dataset):
def __init__(self, filepath, device="cuda:0"):
self.device = device
ratings, users, items = self.load_data(filepath)
self.user_feats = {}
self.item_feats = {}
self.impression_feats = {}
self.user_feats["categorical_feats"] = torch.LongTensor(
users.values).to(device)
self.item_feats["categorical_feats"] = torch.LongTensor(
items.values[:, :2]).to(device)
self.item_feats["real_feats"] = torch.FloatTensor(
items.values[:, 2:]).to(device)
self.impression_feats["user_ids"] = torch.LongTensor(
ratings.values[:, 0]).to(device)
self.impression_feats["item_ids"] = torch.LongTensor(
ratings.values[:, 1]).to(device)
self.impression_feats["real_feats"] = torch.FloatTensor(
ratings.values[:, 3]).view(-1, 1).to(device)
self.impression_feats["labels"] = torch.FloatTensor(
ratings.values[:, 2]).to(device)
def __len__(self):
return len(self.impression_feats["user_ids"])
def __getitem__(self, idx):
labels = self.impression_feats["labels"][idx]
feats = {}
feats["impression_feats"] = {}
feats["impression_feats"]["real_feats"] = self.impression_feats[
"real_feats"][idx]
user_id = self.impression_feats["user_ids"][idx]
item_id = self.impression_feats["item_ids"][idx]
feats["user_feats"] = {
key: value[user_id - 1]
for key, value in self.user_feats.items()
}
feats["item_feats"] = {
key: value[item_id - 1]
for key, value in self.item_feats.items()
}
return feats, labels
def load_data(self, filepath):
names = "UserID::MovieID::Rating::Timestamp".split("::")
ratings = pd.read_csv(
os.path.join(filepath, "ratings.dat"),
sep="::",
names=names,
engine="python")
ratings["Rating"] = (ratings["Rating"] > 3).astype(int)
ratings["Timestamp"] = (
ratings["Timestamp"] - ratings["Timestamp"].min()
) / float(ratings["Timestamp"].max() - ratings["Timestamp"].min())
names = "UserID::Gender::Age::Occupation::Zip-code".split("::")
users = pd.read_csv(
os.path.join(filepath, "users.dat"),
sep="::",
names=names,
engine="python")
for i in range(1, users.shape[1]):
users.iloc[:, i] = pd.factorize(users.iloc[:, i])[0]
names = "MovieID::Title::Genres".split("::")
Genres = [
"Action", "Adventure", "Animation", "Children's", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War",
"Western"
]
movies = pd.read_csv(
os.path.join(filepath, "movies.dat"),
sep="::",
names=names,
engine="python")
movies["Year"] = movies["Title"].apply(lambda x: x[-5:-1])
for genre in Genres:
movies[genre] = movies["Genres"].apply(lambda x: genre in x)
movies.iloc[:, 3] = pd.factorize(movies.iloc[:, 3])[0]
movies.iloc[:, 4:] = movies.iloc[:, 4:].astype(float)
movies = movies.loc[:, ["MovieID", "Year"] + Genres]
movies.iloc[:, 2:] = movies.iloc[:, 2:].div(
movies.iloc[:, 2:].sum(axis=1), axis=0)
movie_id_map = {}
for i in range(movies.shape[0]):
movie_id_map[movies.loc[i, "MovieID"]] = i + 1
movies["MovieID"] = movies["MovieID"].apply(lambda x: movie_id_map[x])
ratings["MovieID"] = ratings["MovieID"].apply(
lambda x: movie_id_map[x])
self.NUM_ITEMS = len(movies.MovieID.unique())
self.NUM_YEARS = len(movies.Year.unique())
self.NUM_GENRES = movies.shape[1] - 2
self.NUM_USERS = len(users.UserID.unique())
self.NUM_OCCUPS = len(users.Occupation.unique())
self.NUM_AGES = len(users.Age.unique())
self.NUM_ZIPS = len(users["Zip-code"].unique())
return ratings, users, movies
# + id="Oatjq-lbXgCw"
class SyntheticMovieLensDataset(Dataset):
def __init__(self, filepath, simulator_path, synthetic_data_path, cut=0.764506,
device="cuda:0"):
self.device = device
self.cut = cut
self.simulator = None
ratings, users, items = self.load_data(filepath)
self.user_feats = {}
self.item_feats = {}
self.impression_feats = {}
self.user_feats["categorical_feats"] = torch.LongTensor(users.values)
self.item_feats["categorical_feats"] = torch.LongTensor(
items.values[:, :2])
self.item_feats["real_feats"] = torch.FloatTensor(items.values[:, 2:])
if os.path.exists(synthetic_data_path):
self.impression_feats = torch.load(synthetic_data_path)
self.impression_feats["labels"] = (
self.impression_feats["label_probs"] >= cut).to(
dtype=torch.float32)
print("loaded full_impression_feats.pt")
else:
print("generating impression_feats")
self.simulator = ImpressionSimulator(use_impression_feats=True)
self.simulator.load_state_dict(torch.load(simulator_path))
self.simulator = self.simulator.to(device)
impressions = self.get_full_impressions(ratings)
self.impression_feats["user_ids"] = torch.LongTensor(
impressions[:, 0])
self.impression_feats["item_ids"] = torch.LongTensor(
impressions[:, 1])
self.impression_feats["real_feats"] = torch.FloatTensor(
impressions[:, 2]).view(-1, 1)
self.impression_feats["labels"] = torch.zeros_like(
self.impression_feats["real_feats"])
self.impression_feats["label_probs"] = self.generate_labels()
self.impression_feats["labels"] = (
self.impression_feats["label_probs"] >= cut).to(
dtype=torch.float32)
torch.save(self.impression_feats, synthetic_data_path)
print("saved impression_feats")
def __len__(self):
return len(self.impression_feats["user_ids"])
def __getitem__(self, idx):
labels = self.impression_feats["labels"][idx]
feats = {}
feats["impression_feats"] = {}
feats["impression_feats"]["real_feats"] = self.impression_feats[
"real_feats"][idx]
user_id = self.impression_feats["user_ids"][idx]
item_id = self.impression_feats["item_ids"][idx]
feats["user_feats"] = {
key: value[user_id - 1]
for key, value in self.user_feats.items()
}
feats["item_feats"] = {
key: value[item_id - 1]
for key, value in self.item_feats.items()
}
return feats, labels
def load_data(self, filepath):
names = "UserID::MovieID::Rating::Timestamp".split("::")
ratings = pd.read_csv(
os.path.join(filepath, "ratings.dat"),
sep="::",
names=names,
engine="python")
ratings["Rating"] = (ratings["Rating"] > 3).astype(int)
ratings["Timestamp"] = (
ratings["Timestamp"] - ratings["Timestamp"].min()
) / float(ratings["Timestamp"].max() - ratings["Timestamp"].min())
names = "UserID::Gender::Age::Occupation::Zip-code".split("::")
users = pd.read_csv(
os.path.join(filepath, "users.dat"),
sep="::",
names=names,
engine="python")
for i in range(1, users.shape[1]):
users.iloc[:, i] = pd.factorize(users.iloc[:, i])[0]
names = "MovieID::Title::Genres".split("::")
Genres = [
"Action", "Adventure", "Animation", "Children's", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War",
"Western"
]
movies = pd.read_csv(
os.path.join(filepath, "movies.dat"),
sep="::",
names=names,
engine="python")
movies["Year"] = movies["Title"].apply(lambda x: x[-5:-1])
for genre in Genres:
movies[genre] = movies["Genres"].apply(lambda x: genre in x)
movies.iloc[:, 3] = pd.factorize(movies.iloc[:, 3])[0]
movies.iloc[:, 4:] = movies.iloc[:, 4:].astype(float)
movies = movies.loc[:, ["MovieID", "Year"] + Genres]
movies.iloc[:, 2:] = movies.iloc[:, 2:].div(
movies.iloc[:, 2:].sum(axis=1), axis=0)
movie_id_map = {}
for i in range(movies.shape[0]):
movie_id_map[movies.loc[i, "MovieID"]] = i + 1
movies["MovieID"] = movies["MovieID"].apply(lambda x: movie_id_map[x])
ratings["MovieID"] = ratings["MovieID"].apply(
lambda x: movie_id_map[x])
self.NUM_ITEMS = len(movies.MovieID.unique())
self.NUM_YEARS = len(movies.Year.unique())
self.NUM_GENRES = movies.shape[1] - 2
self.NUM_USERS = len(users.UserID.unique())
self.NUM_OCCUPS = len(users.Occupation.unique())
self.NUM_AGES = len(users.Age.unique())
self.NUM_ZIPS = len(users["Zip-code"].unique())
return ratings, users, movies
def get_full_impressions(self, ratings):
"""Gets NUM_USERS x NUM_ITEMS impression features by iterating the user and item ids.
The impression-level feature, i.e. the timestamp, is sampled from a normal distribution with
mean and std as the empirical mean and std of each user's recorded timestamps in the real data.
"""
timestamps = {}
for i in range(len(ratings)):
u_id = ratings.loc[i, "UserID"]
timestamps[u_id] = timestamps.get(u_id, [])
timestamps[u_id].append(ratings.loc[i, "Timestamp"])
rs = np.random.RandomState(0)
t_samples = []
for i in range(self.NUM_USERS):
u_id = i + 1
t_samples.append(
rs.normal(
loc=np.mean(timestamps[u_id]),
scale=np.std(timestamps[u_id]),
size=(self.NUM_ITEMS, )))
t_samples = np.array(t_samples)
impressions = []
for i in range(self.NUM_USERS):
for j in range(self.NUM_ITEMS):
impressions.append([i + 1, j + 1, t_samples[i, j]])
impressions = np.array(impressions)
return impressions
def to_device(self, data):
if isinstance(data, torch.Tensor):
return data.to(self.device)
if isinstance(data, dict):
transformed_data = {}
for key in data:
transformed_data[key] = self.to_device(data[key])
elif type(data) == list:
transformed_data = []
for x in data:
transformed_data.append(self.to_device(x))
else:
raise NotImplementedError(
"Type {} not supported.".format(type(data)))
return transformed_data
def generate_labels(self):
"""Generates the binary labels using the simulator on every user-item pair."""
with torch.no_grad():
self.simulator.eval()
preds = []
for i in tqdm(range(len(self.impression_feats["labels"]) // 500)):
feats, _ = self.__getitem__(
list(range(i * 500, (i + 1) * 500)))
feats = self.to_device(feats)
outputs = torch.sigmoid(self.simulator(**feats))
preds += list(outputs.squeeze().cpu().numpy())
if (i + 1) * 500 < len(self.impression_feats["labels"]):
feats, _ = self.__getitem__(
list(
range((i + 1) * 500,
len(self.impression_feats["labels"]))))
feats = self.to_device(feats)
outputs = torch.sigmoid(self.simulator(**feats))
preds += list(outputs.squeeze().cpu().numpy())
return torch.FloatTensor(np.array(preds))
# + [markdown] id="DHxVBSLOS-EH"
# ## Metrics
# + id="mG2JSOybYwSb"
class BaseMetric(object):
def __init__(self, rel_threshold, k):
self.rel_threshold = rel_threshold
if np.isscalar(k):
k = np.array([k])
self.k = k
def __len__(self):
return len(self.k)
def __call__(self, *args, **kwargs):
raise NotImplementedError
def _compute(self, *args, **kwargs):
raise NotImplementedError
# + id="ORCmDzDbYyve"
class PrecisionRecall(BaseMetric):
def __init__(self, rel_threshold=0, k=10):
super(PrecisionRecall, self).__init__(rel_threshold, k)
def __len__(self):
return 2 * len(self.k)
def __str__(self):
str_precision = [('Precision@%1.f' % x) for x in self.k]
str_recall = [('Recall@%1.f' % x) for x in self.k]
return (','.join(str_precision)) + ',' + (','.join(str_recall))
def __call__(self, targets, predictions):
precision, recall = zip(
*[self._compute(targets, predictions, x) for x in self.k])
result = np.concatenate((precision, recall), axis=0)
return result
def _compute(self, targets, predictions, k):
predictions = predictions[:k]
num_hit = len(set(predictions).intersection(set(targets)))
return float(num_hit) / len(predictions), float(num_hit) / len(targets)
# + id="vb-CoYr4Y041"
class MeanAP(BaseMetric):
def __init__(self, rel_threshold=0, k=np.inf):
super(MeanAP, self).__init__(rel_threshold, k)
def __call__(self, targets, predictions):
result = [self._compute(targets, predictions, x) for x in self.k]
return np.array(result)
def __str__(self):
return ','.join([('MeanAP@%1.f' % x) for x in self.k])
def _compute(self, targets, predictions, k):
if len(predictions) > k:
predictions = predictions[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predictions):
if p in targets and p not in predictions[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
if not list(targets):
return 0.0
return score / min(len(targets), k)
# + id="3ik4wXqKY2bD"
class NormalizedDCG(BaseMetric):
def __init__(self, rel_threshold=0, k=10):
super(NormalizedDCG, self).__init__(rel_threshold, k)
def __call__(self, targets, predictions):
result = [self._compute(targets, predictions, x) for x in self.k]
return np.array(result)
def __str__(self):
return ','.join([('NDCG@%1.f' % x) for x in self.k])
def _compute(self, targets, predictions, k):
k = min(len(targets), k)
if len(predictions) > k:
predictions = predictions[:k]
# compute idcg
idcg = np.sum(1 / np.log2(np.arange(2, k + 2)))
dcg = 0.0
for i, p in enumerate(predictions):
if p in targets:
dcg += 1 / np.log2(i + 2)
ndcg = dcg / idcg
return ndcg
all_metrics = [PrecisionRecall(k=[1, 5, 10]), NormalizedDCG(k=[5, 10, 20])]
# + id="8X-dbHHdY4Ke"
class Evaluator(object):
"""Evaluator for both one-stage and two-stage evaluations."""
def __init__(self, u, a, simulator, syn):
self.u = u
self.a = a
self.simulator = simulator
self.syn = syn
self.target_rankings = self.get_target_rankings()
self.metrics = all_metrics
def get_target_rankings(self):
target_rankings = []
with torch.no_grad():
self.simulator.eval()
for i in range(NUM_USERS):
impression_ids = range(i * NUM_ITEMS, (i + 1) * NUM_ITEMS)
feats, _ = self.syn[impression_ids]
feats["impression_feats"]["real_feats"] = torch.mean(
feats["impression_feats"]["real_feats"],
dim=0,
keepdim=True).repeat([NUM_ITEMS, 1])
feats = self.syn.to_device(feats)
outputs = torch.sigmoid(self.simulator(**feats))
user_target_ranking = (outputs >
self.syn.cut).nonzero().view(-1)
target_rankings.append(user_target_ranking.cpu().numpy())
return target_rankings
def one_stage_ranking_eval(self, logits, user_list):
for i, user in enumerate(user_list):
user_rated_items = self.a[self.u == user]
logits[i, user_rated_items] = -np.inf
sort_idx = torch.argsort(logits, dim=1, descending=True).cpu().numpy()
# Init evaluation results.
total_metrics_len = 0
for metric in self.metrics:
total_metrics_len += len(metric)
total_val_metrics = np.zeros(
[len(user_list), total_metrics_len], dtype=np.float32)
valid_rows = []
for i, user in enumerate(user_list):
pred_ranking = sort_idx[i].tolist()
target_ranking = self.target_rankings[user]
if len(target_ranking) <= 0:
continue
metric_results = list()
for j, metric in enumerate(self.metrics):
result = metric(
targets=target_ranking, predictions=pred_ranking)
metric_results.append(result)
total_val_metrics[i, :] = np.concatenate(metric_results)
valid_rows.append(i)
# Average evaluation results by user.
total_val_metrics = total_val_metrics[valid_rows]
avg_val_metrics = (total_val_metrics.mean(axis=0)).tolist()
# Summary evaluation results into a dict.
ind, result = 0, OrderedDict()
for metric in self.metrics:
values = avg_val_metrics[ind:ind + len(metric)]
if len(values) <= 1:
result[str(metric)] = values
else:
for name, value in zip(str(metric).split(','), values):
result[name] = value
ind += len(metric)
return result
def two_stage_ranking_eval(self, logits, ranker, user_list, k=30):
sort_idx = torch.argsort(logits, dim=1, descending=True).cpu().numpy()
topk_item_ids = []
for i, user in enumerate(user_list):
topk_item_ids.append([])
for j in sort_idx[i]:
if j not in self.a[self.u == user]:
topk_item_ids[-1].append(j)
if len(topk_item_ids[-1]) == k:
break
time_feats = self.syn.to_device(
torch.mean(
self.syn.impression_feats["real_feats"].view(
NUM_USERS, NUM_ITEMS),
dim=1).view(-1, 1))
# Init evaluation results.
total_metrics_len = 0
for metric in self.metrics:
total_metrics_len += len(metric)
total_val_metrics = np.zeros(
[len(user_list), total_metrics_len], dtype=np.float32)
valid_rows = []
for i, user in enumerate(user_list):
user_feats = {
key: value[user].view(1, -1)
for key, value in self.syn.user_feats.items()
}
item_feats = {
key: value[topk_item_ids[i]]
for key, value in self.syn.item_feats.items()
}
user_feats = self.syn.to_device(user_feats)
item_feats = self.syn.to_device(item_feats)
impression_feats = {"real_feats": time_feats[user].view(1, -1)}
ranker_logits = ranker(user_feats, item_feats,
impression_feats).view(1, -1)
_, pred = ranker_logits.topk(k=k)
pred = pred[0].cpu().numpy()
pred_ranking = sort_idx[i][pred].tolist()
target_ranking = self.target_rankings[user]
if len(target_ranking) <= 0:
continue
metric_results = list()
for j, metric in enumerate(self.metrics):
result = metric(
targets=target_ranking, predictions=pred_ranking)
metric_results.append(result)
total_val_metrics[i, :] = np.concatenate(metric_results)
valid_rows.append(i)
# Average evaluation results by user.
total_val_metrics = total_val_metrics[valid_rows]
avg_val_metrics = (total_val_metrics.mean(axis=0)).tolist()
# Summary evaluation results into a dict.
ind, result = 0, OrderedDict()
for metric in self.metrics:
values = avg_val_metrics[ind:ind + len(metric)]
if len(values) <= 1:
result[str(metric)] = values
else:
for name, value in zip(str(metric).split(','), values):
result[name] = value
ind += len(metric)
return result
def one_stage_eval(self, logits):
sort_idx = torch.argsort(logits, dim=1, descending=True).cpu().numpy()
impression_ids = []
for i in range(NUM_USERS):
for j in sort_idx[i]:
if j not in self.a[self.u == i]:
break
impression_ids.append(i * NUM_ITEMS + j)
feats, labels = self.syn[impression_ids]
feats["impression_feats"]["real_feats"] = torch.mean(
self.syn.impression_feats["real_feats"].view(NUM_USERS, NUM_ITEMS),
dim=1).view(-1, 1)
with torch.no_grad():
self.simulator.eval()
feats = self.syn.to_device(feats)
outputs = torch.sigmoid(self.simulator(**feats))
return torch.mean(
(outputs > self.syn.cut).to(dtype=torch.float32)).item()
def two_stage_eval(self, logits, ranker, k=30):
sort_idx = torch.argsort(logits, dim=1, descending=True).cpu().numpy()
topk_item_ids = []
for i in range(NUM_USERS):
topk_item_ids.append([])
for j in sort_idx[i]:
if j not in self.a[self.u == i]:
topk_item_ids[-1].append(j)
if len(topk_item_ids[-1]) == k:
break
time_feats = self.syn.to_device(
torch.mean(
self.syn.impression_feats["real_feats"].view(
NUM_USERS, NUM_ITEMS),
dim=1).view(-1, 1))
recommneded = []
for i in range(NUM_USERS):
user_feats = {
key: value[i].view(1, -1)
for key, value in self.syn.user_feats.items()
}
item_feats = {
key: value[topk_item_ids[i]]
for key, value in self.syn.item_feats.items()
}
user_feats = self.syn.to_device(user_feats)
item_feats = self.syn.to_device(item_feats)
impression_feats = {"real_feats": time_feats[i].view(1, -1)}
ranker_logits = ranker(user_feats, item_feats,
impression_feats).view(1, -1)
_, pred = torch.max(ranker_logits, 1)
pred = pred.squeeze().item()
recommneded.append(topk_item_ids[i][pred])
impression_ids = []
for i in range(NUM_USERS):
impression_ids.append(i * NUM_ITEMS + recommneded[i])
feats, labels = self.syn[impression_ids]
feats["impression_feats"]["real_feats"] = torch.mean(
self.syn.impression_feats["real_feats"].view(NUM_USERS, NUM_ITEMS),
dim=1).view(-1, 1)
with torch.no_grad():
self.simulator.eval()
feats = self.syn.to_device(feats)
outputs = torch.sigmoid(self.simulator(**feats))
return torch.mean(
(outputs > self.syn.cut).to(dtype=torch.float32)).item()
# + [markdown] id="5eFQHVu4Yjl3"
# ## Losses
# + id="jxr1uWzxZDxK"
def batch_select(mat, idx):
mask = torch.arange(mat.size(1)).expand_as(mat).to(
mat.device, dtype=torch.long)
mask = (mask == idx.view(-1, 1))
return torch.masked_select(mat, mask)
def unique_and_padding(mat, padding_idx, dim=-1):
"""Conducts unique operation along dim and pads to the same length."""
samples, _ = torch.sort(mat, dim=dim)
samples_roll = torch.roll(samples, -1, dims=dim)
samples_diff = samples - samples_roll
samples_diff[:,
-1] = 1 # deal with the edge case that there is only one unique sample in a row
samples_mask = torch.bitwise_not(samples_diff == 0) # unique mask
samples *= samples_mask.to(dtype=samples.dtype)
samples += (1 - samples_mask.to(dtype=samples.dtype)) * padding_idx
samples, _ = torch.sort(samples, dim=dim)
# shrink size to max unique length
samples = torch.unique(samples, dim=dim)
return samples
# + id="e_YSbiXxZIA3"
def loss_ce(logits, a, unused_p=None, unused_ranker_logits=None):
"""Cross entropy."""
return -torch.mean(batch_select(F.log_softmax(logits, dim=1), a))
# + id="smu-8Qr5ZGsJ"
def loss_ips(logits,
a,
p,
unused_ranker_logits=None,
upper_limit=100,
lower_limit=0.01):
"""IPS loss (one-stage)."""
importance_weight = batch_select(F.softmax(logits.detach(), dim=1), a) / p
importance_weight = torch.where(
importance_weight > lower_limit, importance_weight,
lower_limit * torch.ones_like(importance_weight))
importance_weight = torch.where(
importance_weight < upper_limit, importance_weight,
upper_limit * torch.ones_like(importance_weight))
importance_weight /= torch.mean(importance_weight)
return -torch.mean(
batch_select(F.log_softmax(logits, dim=1), a) * importance_weight)
# + id="cabVpWwPZFJa"
def loss_2s(logits,
a,
p,
ranker_logits,
slate_sample_size=100,
slate_size=30,
temperature=np.e,
alpha=1e-5,
upper_limit=100,
lower_limit=0.01):
"""Two stage loss."""
num_logits = logits.size(1)
rls = ranker_logits.detach()
probs = F.softmax(logits.detach() / temperature, dim=1)
log_probs = F.log_softmax(logits, dim=1)
rls = torch.cat(
[
rls,
torch.Tensor([float("-inf")]).to(rls.device).view(1, 1).expand(
rls.size(0), 1)
],
dim=1)
log_probs = torch.cat(
[log_probs,
torch.zeros(log_probs.size(0), 1).to(log_probs.device)],
dim=1)
importance_weight = batch_select(F.softmax(logits.detach(), dim=1), a) / p
importance_weight = torch.where(
importance_weight > lower_limit, importance_weight,
lower_limit * torch.ones_like(importance_weight))
importance_weight = torch.where(
importance_weight < upper_limit, importance_weight,
upper_limit * torch.ones_like(importance_weight))
importance_weight /= torch.mean(importance_weight)
log_action_res = []
sampled_slate_res = []
for i in range(probs.size(0)):
samples = torch.multinomial(
probs[i], slate_sample_size * slate_size, replacement=True).view(
slate_sample_size, slate_size)
samples = torch.cat(
[samples, a[i].view(1, 1).expand(samples.size(0), 1)], dim=1)
samples = unique_and_padding(samples, num_logits)
rp = F.softmax(
F.embedding(samples, rls[i].view(-1, 1)).squeeze(-1), dim=1)
lp = torch.sum(
F.embedding(samples, log_probs[i].view(-1, 1)).squeeze(-1),
dim=1) - log_probs[i, a[i]]
sampled_slate_res.append(
torch.mean(importance_weight[i] * rp[samples == a[i]] * lp))
log_action_res.append(
torch.mean(importance_weight[i] * rp[samples == a[i]]))
loss = -torch.mean(
torch.stack(log_action_res) * batch_select(log_probs, a))
loss += -torch.mean(torch.stack(sampled_slate_res)) * alpha
return loss
# + [markdown] id="oXehpz2RS9_m"
# ## Main
# + id="zMBV6BttW-fg"
# !pip install -q torchnet
# + id="QLae9ZGJS99b"
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from six.moves import cPickle as pickle
from torch.utils.data import DataLoader, Subset
from torchnet.meter import AUCMeter
# + id="ALobTMq4T11c"
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", type=int, default=1, help="Verbose.")
parser.add_argument("--seed", type=int, default=0, help="Random seed.")
parser.add_argument("--loss_type", default="loss_ce")
parser.add_argument("--device", default="cuda:0")
parser.add_argument("--alpha", type=float, default=1e-3, help="Loss ratio.")
parser.add_argument("--lr", type=float, default=0.05, help="Learning rate.")
args = parser.parse_args(args={})
torch.manual_seed(0)
torch.cuda.manual_seed(0)
filepath = "./ml-1m"
device = args.device
dataset = MovieLensDataset(filepath, device=device)
NUM_ITEMS = dataset.NUM_ITEMS
NUM_YEARS = dataset.NUM_YEARS
NUM_GENRES = dataset.NUM_GENRES
NUM_USERS = dataset.NUM_USERS
NUM_OCCUPS = dataset.NUM_OCCUPS
NUM_AGES = dataset.NUM_AGES
NUM_ZIPS = dataset.NUM_ZIPS
# + colab={"base_uri": "https://localhost:8080/"} id="nDDAOXjTTxzR" executionInfo={"status": "ok", "timestamp": 1633455089789, "user_tz": -330, "elapsed": 1787668, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="0350d0a6-9c9a-4542-87d6-6eda111736e5"
simulator_path = os.path.join(filepath, "simulator.pt")
if os.path.exists(simulator_path):
simulator = ImpressionSimulator(use_impression_feats=True)
simulator.load_state_dict(torch.load(simulator_path))
simulator.to(device)
simulator.eval()
else:
# train a simulator model on the original ML-1M dataset
# the simulator will be used to generate synthetic labels later
torch.manual_seed(0)
torch.cuda.manual_seed(0)
num_samples = len(dataset)
train_loader = DataLoader(
Subset(dataset, list(range(num_samples * 3 // 5))),
batch_size=128,
shuffle=True)
val_loader = DataLoader(
Subset(dataset,
list(range(num_samples * 3 // 5, num_samples * 4 // 5))),
batch_size=128)
test_loader = DataLoader(
Subset(dataset, list(range(num_samples * 4 // 5, num_samples))),
batch_size=128)
simulator = ImpressionSimulator(use_impression_feats=True).to(device)
opt = torch.optim.Adagrad(
simulator.parameters(), lr=0.05, weight_decay=1e-4)
criterion = nn.BCEWithLogitsLoss()
simulator.train()
for epoch in range(4):
print("---epoch {}---".format(epoch))
for step, batch in enumerate(train_loader):
feats, labels = batch
logits = simulator(**feats)
loss = criterion(logits, labels)
opt.zero_grad()
loss.backward()
opt.step()
if (step + 1) % 500 == 0:
with torch.no_grad():
simulator.eval()
auc = AUCMeter()
for feats, labels in val_loader:
outputs = torch.sigmoid(simulator(**feats))
auc.add(outputs, labels)
print(step, auc.value()[0])
if auc.value()[0] > 0.735:
break
simulator.train()
simulator.to("cpu")
torch.save(simulator.state_dict(), simulator_path)
# + colab={"base_uri": "https://localhost:8080/"} id="nvYcuBtBTwAT" executionInfo={"status": "ok", "timestamp": 1633455306838, "user_tz": -330, "elapsed": 217093, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="7786e902-1ae8-4375-870c-b3b43a34ba16"
# create a torch dataset class that adopt the simulator and generate the synthetic dataset
synthetic_data_path = os.path.join(filepath, "full_impression_feats.pt")
syn = SyntheticMovieLensDataset(
filepath, simulator_path, synthetic_data_path, device=device)
logging_policy_path = os.path.join(filepath, "logging_policy.pt")
if os.path.exists(logging_policy_path):
logging_policy = Nominator()
logging_policy.load_state_dict(torch.load(logging_policy_path))
logging_policy.to(device)
logging_policy.eval()
else:
# train a logging policy using the synthetic dataset
num_samples = len(syn)
idx_list = list(range(num_samples))
rs = np.random.RandomState(0)
rs.shuffle(idx_list)
train_idx = idx_list[:10000]
val_idx = idx_list[10000:20000]
test_idx = idx_list[-100000:]
train_loader = DataLoader(
Subset(syn, train_idx), batch_size=128, shuffle=True)
val_loader = DataLoader(Subset(syn, val_idx), batch_size=128)
test_loader = DataLoader(Subset(syn, test_idx), batch_size=128)
logging_policy = Nominator().to(device)
opt = torch.optim.Adagrad(
logging_policy.parameters(), lr=0.05, weight_decay=1e-4)
criterion = nn.BCEWithLogitsLoss()
logging_policy.train()
for epoch in range(40):
print("---epoch {}---".format(epoch))
for step, batch in enumerate(train_loader):
feats, labels = batch
feats = syn.to_device(feats)
labels = syn.to_device(labels)
logits = logging_policy(feats["user_feats"], feats["item_feats"])
loss = criterion(logits, labels)
opt.zero_grad()
loss.backward()
opt.step()
with torch.no_grad():
logging_policy.eval()
auc = AUCMeter()
for feats, labels in val_loader:
feats = syn.to_device(feats)
labels = syn.to_device(labels)
outputs = torch.sigmoid(
logging_policy(feats["user_feats"], feats["item_feats"]))
auc.add(outputs, labels)
print(step, auc.value()[0])
logging_policy.train()
logging_policy.eval()
logging_policy.to("cpu")
torch.save(logging_policy.state_dict(), logging_policy_path)
logging_policy.to(device)
# + id="dRwfP8RXTuLQ"
def generate_bandit_samples(logging_policy, syn, k=5):
"""Generates partial-labeled bandit samples with the logging policy.
Arguments:
k: The number of items to be sampled for each user.
"""
logging_policy.set_binary(False)
with torch.no_grad():
feats = {}
feats["user_feats"] = syn.user_feats
feats["item_feats"] = syn.item_feats
feats = syn.to_device(feats)
probs = F.softmax(logging_policy(**feats), dim=1)
sampled_users = []
sampled_actions = []
sampled_probs = []
sampled_rewards = []
for i in range(probs.size(0)):
sampled_users.append([i] * k)
sampled_actions.append(
torch.multinomial(probs[i], k).cpu().numpy().tolist())
sampled_probs.append(
probs[i, sampled_actions[-1]].cpu().numpy().tolist())
sampled_rewards.append(syn.impression_feats["labels"][[
i * probs.size(1) + j for j in sampled_actions[-1]
]].numpy().tolist())
return np.array(sampled_users).reshape(-1), np.array(
sampled_actions).reshape(-1), np.array(sampled_probs).reshape(
-1), np.array(sampled_rewards).reshape(-1)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
u, a, p, r = generate_bandit_samples(
logging_policy, syn,
k=5) # u: user, a: item, p: logging policy probability, r: reward/label
simulator = simulator.to(device)
ev = Evaluator(u[r > 0], a[r > 0], simulator, syn)
all_user_feats = syn.to_device(syn.user_feats)
all_item_feats = syn.to_device(syn.item_feats)
all_impression_feats = syn.to_device({
"real_feats":
torch.mean(
syn.impression_feats["real_feats"].view(NUM_USERS, NUM_ITEMS),
dim=1).view(-1, 1)
})
# + colab={"base_uri": "https://localhost:8080/"} id="B-uzlJDoTsGo" executionInfo={"status": "ok", "timestamp": 1633456341552, "user_tz": -330, "elapsed": 28192, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="be5db6fb-58d9-4b29-ec10-7f277d3f46e1"
# Split validation/test users.
num_val_users = 2000
val_user_list = list(range(0, num_val_users))
test_user_list = list(range(num_val_users, NUM_USERS))
test_item_feats = all_item_feats
test_user_feats = syn.to_device(
{key: value[test_user_list]
for key, value in all_user_feats.items()})
test_impression_feats = syn.to_device({
key: value[test_user_list]
for key, value in all_impression_feats.items()
})
val_item_feats = all_item_feats
val_user_feats = syn.to_device(
{key: value[val_user_list]
for key, value in all_user_feats.items()})
val_impression_feats = syn.to_device(
{key: value[val_user_list]
for key, value in all_impression_feats.items()})
ranker_path = os.path.join(filepath, "ranker.pt")
if os.path.exists(ranker_path):
ranker = Ranker()
ranker.load_state_dict(torch.load(ranker_path))
ranker.to(device)
ranker.eval()
ranker.set_binary(False)
else:
# train the ranker with binary cross-entropy
torch.manual_seed(0)
torch.cuda.manual_seed(0)
batch_size = 128
neg_sample_size = 29
ranker = Ranker().to(device)
opt = torch.optim.Adagrad(ranker.parameters(), lr=0.05, weight_decay=1e-4)
criterion = nn.BCEWithLogitsLoss()
rs = np.random.RandomState(0)
ranker.train()
for epoch in range(10):
print("---epoch {}---".format(epoch))
for step in range(len(u) // batch_size):
user_list = u[step * batch_size:(step + 1) * batch_size]
item_list = a[step * batch_size:(step + 1) * batch_size]
user_feats = syn.to_device({
key: value[user_list]
for key, value in syn.user_feats.items()
})
item_feats = syn.to_device({
key: value[item_list]
for key, value in syn.item_feats.items()
})
impression_list = [
user_id * NUM_ITEMS + item_id
for user_id, item_id in zip(user_list, item_list)
]
impression_feats = syn.to_device({
"real_feats":
syn.impression_feats["real_feats"][impression_list]
})
labels = torch.FloatTensor(
r[step * batch_size:(step + 1) * batch_size]).to(device)
logits = ranker(user_feats, item_feats, impression_feats)
loss = criterion(logits, labels)
opt.zero_grad()
loss.backward()
opt.step()
with torch.no_grad():
ranker.eval()
ranker.set_binary(False)
logits = ranker(all_user_feats, all_item_feats,
all_impression_feats)
print(step, ev.one_stage_eval(logits))
# Evaluate ranking metrics on validation users.
logits = ranker(val_user_feats, val_item_feats,
val_impression_feats)
print(step, ev.one_stage_ranking_eval(logits, val_user_list))
ranker.train()
ranker.set_binary(True)
ranker.eval()
ranker.set_binary(False)
ranker.to("cpu")
torch.save(ranker.state_dict(), ranker_path)
ranker.to(device)
# + colab={"base_uri": "https://localhost:8080/"} id="GcdIHsUETncX" executionInfo={"status": "ok", "timestamp": 1633456341555, "user_tz": -330, "elapsed": 33, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="ca461679-dff9-4f3a-e936-ae4aa5d74b3f"
u = u[r > 0]
a = a[r > 0]
p = p[r > 0]
batch_size = 128
check_metric = "Precision@10"
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
nominator = Nominator().to(device)
nominator.set_binary(False)
opt = torch.optim.Adagrad(
nominator.parameters(), lr=args.lr, weight_decay=1e-4)
rs = np.random.RandomState(0)
nominator.train()
# + colab={"base_uri": "https://localhost:8080/"} id="O--w0zZqTlPk" executionInfo={"status": "ok", "timestamp": 1633456966028, "user_tz": -330, "elapsed": 622780, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="93cd4783-0faf-400e-da0d-0220295fad36"
# Init results.
best_epoch = 0
best_result = 0.0
val_results, test_results = [], []
if args.loss_type == "loss_2s":
with torch.no_grad():
ranker.eval()
ranker.set_binary(False)
ranker_logits = ranker(all_user_feats, all_item_feats,
all_impression_feats)
for epoch in range(20):
print("---epoch {}---".format(epoch))
for step in range(len(u) // batch_size):
item_ids = torch.LongTensor(
a[step * batch_size:(step + 1) * batch_size]).to(device)
item_probs = torch.FloatTensor(
p[step * batch_size:(step + 1) * batch_size]).to(device)
user_ids = u[step * batch_size:(step + 1) * batch_size]
user_feats = {
key: value[user_ids]
for key, value in syn.user_feats.items()
}
user_feats = syn.to_device(user_feats)
logits = nominator(user_feats, val_item_feats)
if args.loss_type == "loss_ce":
loss = loss_ce(logits, item_ids, item_probs)
elif args.loss_type == "loss_ips":
loss = loss_ips(logits, item_ids, item_probs, upper_limit=10)
elif args.loss_type == "loss_2s":
batch_ranker_logits = F.embedding(
torch.LongTensor(user_ids).to(device), ranker_logits)
loss = loss_2s(
logits,
item_ids,
item_probs,
batch_ranker_logits,
upper_limit=10,
alpha=args.alpha)
else:
raise NotImplementedError(
"{} not supported.".format(args.loss_type))
opt.zero_grad()
loss.backward()
opt.step()
with torch.no_grad():
nominator.eval()
logits = nominator(all_user_feats, all_item_feats)
print("1 stage", ev.one_stage_eval(logits))
print("2 stage", ev.two_stage_eval(logits, ranker))
# Evaluate ranking metrics on validation users.
logits = nominator(val_user_feats, val_item_feats)
one_stage_results = ev.one_stage_ranking_eval(logits, val_user_list)
print("1 stage (val)", one_stage_results)
two_stage_results = ev.two_stage_ranking_eval(logits, ranker,
val_user_list)
print("2 stage (val)", two_stage_results)
val_results.append((one_stage_results, two_stage_results))
# Log best epoch
if two_stage_results[check_metric] > best_result:
best_epoch = epoch
best_result = two_stage_results[check_metric]
# Evaluate ranking metrics on test users.
logits = nominator(test_user_feats, test_item_feats)
one_stage_results = ev.one_stage_ranking_eval(logits, test_user_list)
print("1 stage (test)", one_stage_results)
two_stage_results = ev.two_stage_ranking_eval(logits, ranker,
test_user_list)
print("2 stage (test)", two_stage_results)
test_results.append((one_stage_results, two_stage_results))
nominator.train()
# + colab={"base_uri": "https://localhost:8080/"} id="0Vj_CNjjTgsj" executionInfo={"status": "ok", "timestamp": 1633456966030, "user_tz": -330, "elapsed": 41, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="92c6122d-f8f8-4e0a-f77b-591064629aa9"
print("Best validation epoch: {}".format(best_epoch))
print("Best validation stage results\n 1 stage: {}\n 2 stage: {}".format(
val_results[best_epoch][0], val_results[best_epoch][1]))
print("Best test results\n 1 stage: {}\n 2 stage: {}".format(
test_results[best_epoch][0], test_results[best_epoch][1]))
# + id="LvpiLglNThxl"
pickle.dump((best_epoch, val_results, test_results),
open("{}-a{}_{}.pkl".format(
args.loss_type.split("_")[1], args.alpha, args.seed), "wb"))
# + id="W-UYEG3GqQRs"
# !apt-get install tree
# + colab={"base_uri": "https://localhost:8080/"} id="79ap6knRqT49" executionInfo={"status": "ok", "timestamp": 1633457638495, "user_tz": -330, "elapsed": 511, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="a98836ff-7257-4888-fd08-af56775c5bfa"
# !tree --du -h
# + [markdown] id="Bk4t2SpxogJM"
# ## Experimental results
#
# 
#
# ## Conclusion
#
# - The results of 2-IPS method in both evaluations are better than 1-IPS and cross-entropy.
# - 1-IPS performs better than the cross-entropy method in one-stage evaluation, and performs worse than the cross-entropy method in two-stage evaluation, indicating that only improving the performance of a part of the system may not necessarily improve the performance of the entire system.
| _notebooks/2022-01-24-opl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Array Operations
# import numpy
import numpy as np
# ## Copy
# - Copies array to new memory
# - **Syntax:** `np.copy(array)`
# create an array `A1`
A1 = np.arange(10)
print(A1)
# # copy `A1` into A2
A2 = np.copy(A1)
print(A2)
# ## View
# - Creates view of array elements with type(dtype)
# - **Syntax:** `array.view(np.dtype)`
# view of array A2
A3 = A2.view(np.float16)
print(A3)
# ## Sorting
# - Returns a sorted copy of an array.
# - **Syntax:** `array.sort()`
# - element-wise sorting(default)
# - axis = 0; row
# - axis = 1; column
# 
# Unsorted array
A4 = np.array([9, 2, 3,1, 5, 10])
print(A4)
# Call sort function
A4.sort()
print(A4)
# Row and column unsorted
A5 = np.array([[4, 1, 3], [9, 5, 8]])
print(A5)
A5[0]
A5[0][1]
A5[1]
A5[1][2]
# Apply sort function on column axis=1
A5.sort(axis=1)
print(A5)
# Apply sort function on row axis=0
A5.sort(axis=0)
print(A5)
# ## Flatten: Flattens 2D array to 1D array
#
A6 = np.array([[4, 1, 3], [9, 5, 8]])
A6
# 2D array
A6 = np.array([[4, 1, 3], [9, 5, 8]])
# 1D array
A6.flatten()
# ## Transpose: Transposes array (rows become columns and vice versa)
#
A7 = np.array([[4, 1, 3], [9, 5, 8]])
A7
# Transpose A7
A7.T
# ## Reshape: Reshapes arr to `r` rows, `c` columns without changing data
# 
A8 = np.array([(8,9,10),(11,12,13)])
A8
# Reshape --> 3x4
A8.reshape(3,2)
# ## Resize: Changes arr shape to `rxc` and fills new values with 0
#
A9 = np.array([(8,9,10),(11,12,13)])
A9
# Resize
A9.resize(3, 2)
A9
np.info(np.resize)
| 03 - Working with NumPy/notebooks/06-Array-Operations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from bw_recipe_2016 import (
StratosphericOzoneDepletion,
get_biosphere_database,
extract_recipe,
)
data = extract_recipe()[3]
biosphere = get_biosphere_database()
gw = StratosphericOzoneDepletion(data, biosphere)
gw.apply_strategies()
gw.data[0]
gw.write_excel("ozone-depletion")
gw.statistics()
c = gw.compare_to_previous()
[(key, len(c[key])) for key in c]
# Present in previous ReCiPe, missing here
c['reference'].difference(c['found'])
c['found']
gw.drop_unlinked()
gw.write_methods()
gw.data[2]['name']
# # Compare to previous ReCiPe method
# %matplotlib inline
import bw2data, bw2calc
import pyprind
import seaborn as sb
import numpy as np
keys = sorted([x.key for x in bw2data.Database("ecoinvent 3.6 cutoff")])
results_reference, results_2016 = [], []
lca = bw2calc.LCA({keys[0]: 1}, gw.previous_reference)
lca.lci()
lca.lcia()
reference_cm = lca.characterization_matrix.copy()
lca.switch_method(('ReCiPe 2016', 'v1.1 (20180117)', 'ODPinfinite', 'Egalitarian'))
lca.characterization_matrix.sum(), reference_cm.sum()
new_cm = lca.characterization_matrix.copy()
for key in pyprind.prog_bar(keys):
lca.redo_lci({key: 1})
results_reference.append((reference_cm * lca.inventory).sum())
results_2016.append((new_cm * lca.inventory).sum())
sb.scatterplot(np.log10(np.array(results_reference)), np.log10(np.array(results_2016)))
def masked_ratio(a, b):
a, b = np.array(a), np.array(b)
mask = (a != 0) * (b != 0)
return a[mask] / b[mask]
sb.distplot(masked_ratio(results_2016, results_reference))
def abnormal_ratio(a, b):
if not a or not b:
return
ratio = abs(a / b)
if 0.8 < ratio < 1.25:
return
return ratio
exceptions = sorted([
(abnormal_ratio(results_2016[i], results_reference[i]), bw2data.get_activity(key))
for i, key in enumerate(keys)
if abnormal_ratio(results_2016[i], results_reference[i])
], reverse=True)
len(exceptions)
exceptions[:10]
| dev/Stratospheric ozone depletion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
alcohols = pd.read_csv('world_alcohol.csv')
print(alcohols)
alcohols.head()
alcohols.loc[1:4]
alcohols['Year']
alcohols.Year
# # Multi-index
populations = [123, 124, 125, 126, 127, 128]
index = pd.MultiIndex.from_tuples([('China', 2000), ('China', 2001), ('US', 2000), ('US', 2001), ('Canada', 2001), ('Canada', 2004)])
pop = pd.Series(populations, index=index)
pop
df = pd.DataFrame(np.random.rand(4, 2), index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]], columns=['data1', 'data2'])
df
df = pd.Series({('CA', 2000): 1, ('CA', 2001): 2, ('TX', 2000): 3, ('TX', 2002): 4, ('NYC', 2005): 8})
df
df = pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], [1, 2, 1, 2]])
print(df)
df = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)])
print(df)
df = pd.MultiIndex.from_product([['a', 'b'], [1, 2]])
print(df)
data = pd.Series(np.random.rand(4), index=df)
print(data)
print(data['a'])
# # concat, join, merge, etc.
df1 = pd.DataFrame({1: 'a', 2: 'b', 3: 'c'}, index=[1, 2, 3])
print(df1)
df2 = pd.DataFrame({1: 'a1', 4: 'd'}, index=[4])
print(df2)
print(pd.concat([df1, df2]))
print(df1.append(df2))
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Platform', 'HR']})
df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_year': [2004, 2008, 2012, 2014]})
print(df1)
print(df2)
print(pd.merge(df1, df2))
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Platform', 'HR']})
df2 = pd.DataFrame({'name': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_year': [2004, 2008, 2012, 2014]})
print(df1)
print(df2)
print(pd.merge(df1, df2, left_on='employee', right_on='name'))
df1.set_index('employee')
df2.set_index('name')
print(pd.merge(df1, df2, left_index=True, right_index=True))
print(df1.join(df2))
# # Group by, aggregation, filter, etc.
rng = np.random.RandomState(0)
df = pd.DataFrame({'key': ['a', 'b', 'c', 'a', 'b', 'c'], 'data1': range(6), 'data2': rng.randint(0, 10, 6)}, columns=['key', 'data1', 'data2'])
print(df)
df.groupby('key').aggregate(['min', np.median, max])
df.groupby('key').transform(lambda x: x - x.mean())
| pandas-intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + papermill={"duration": 7.551943, "end_time": "2021-07-31T20:12:27.011687", "exception": false, "start_time": "2021-07-31T20:12:19.459744", "status": "completed"} tags=[] id="a914b977"
import math
from tqdm import tqdm
import os
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Visuals and CV2
import cv2
# albumentations for augs
import albumentations
from albumentations.pytorch.transforms import ToTensorV2
from sklearn.model_selection import StratifiedKFold, train_test_split
#torch
import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn import functional as F
from torch.utils.data import Dataset,DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim import Adam, lr_scheduler
import transformers
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup,get_cosine_schedule_with_warmup
from transformers import get_cosine_with_hard_restarts_schedule_with_warmup
# + papermill={"duration": 4.555585, "end_time": "2021-07-31T20:12:31.582406", "exception": false, "start_time": "2021-07-31T20:12:27.026821", "status": "completed"} tags=[] colab={"referenced_widgets": ["d915e7d39b95469580ba50f3a4f9d604", "3d135e2a2ca348bab605e59d0ec27ef2", "374b7c0dafc4457e9dd39d541dea8ad2", "aecd998c4a00452c98637e1377f4c62f", "8a59c8d217064894accf4db1b0ae7a4a"]} id="d2e7fcc1" outputId="2fa184b5-3010-4596-c938-c23b6c11b585"
NUM_WORKERS = 4
TRAIN_BATCH_SIZE = 64
EPOCHS = 5
SEED = 2020
LR = 5e-5
device = torch.device('cuda')
################################################# MODEL ####################################################################
transformer_model = 'sentence-transformers/paraphrase-mpnet-base-v2'
# transformer_model = 'xlm-roberta-base'
TOKENIZER = transformers.AutoTokenizer.from_pretrained(transformer_model)
################################################ Metric Loss and its params #######################################################
loss_module = 'arcface'#'softmax'
s = 30.0
m = 0.5
ls_eps = 0.0
easy_margin = False
############################################################################################################################
model_params = {
'n_classes':9919,
'model_name':transformer_model,
'pooling':'clf',
'use_fc':False,
'fc_dim':512,
'dropout':0.0,
'loss_module':loss_module,
's':30.0,
'margin':0.50,
'ls_eps':0.0,
'theta_zero':0.785
}
# + papermill={"duration": 0.024208, "end_time": "2021-07-31T20:12:31.623609", "exception": false, "start_time": "2021-07-31T20:12:31.599401", "status": "completed"} tags=[] id="86d0b0dd"
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# + papermill={"duration": 0.023355, "end_time": "2021-07-31T20:12:31.666432", "exception": false, "start_time": "2021-07-31T20:12:31.643077", "status": "completed"} tags=[] id="71113450"
def fetch_loss():
loss = nn.CrossEntropyLoss()
return loss
# + papermill={"duration": 0.025538, "end_time": "2021-07-31T20:12:31.709373", "exception": false, "start_time": "2021-07-31T20:12:31.683835", "status": "completed"} tags=[] id="21fef45a"
class AMZNDataset(Dataset):
def __init__(self, csv):
self.csv = csv.reset_index()
def __len__(self):
return self.csv.shape[0]
def __getitem__(self, index):
row = self.csv.iloc[index]
text = row.TITLE
text = TOKENIZER(text, padding='max_length', truncation=True, max_length=32, return_tensors="pt")
input_ids = text['input_ids'][0]
attention_mask = text['attention_mask'][0]
return input_ids, attention_mask, torch.tensor(row.BROWSE_NODE_ID)
# + papermill={"duration": 0.030995, "end_time": "2021-07-31T20:12:31.757741", "exception": false, "start_time": "2021-07-31T20:12:31.726746", "status": "completed"} tags=[] id="fcf30979"
class ArcMarginProduct(nn.Module):
r"""Implement of large margin arc distance: :
Args:
in_features: size of each input sample
out_features: size of each output sample
s: norm of input feature
m: margin
cos(theta + m)
"""
def __init__(self, in_features, out_features, s=30.0, m=0.50, easy_margin=False, ls_eps=0.0):
super(ArcMarginProduct, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.ls_eps = ls_eps # label smoothing
self.weight = Parameter(torch.FloatTensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
# --------------------------- convert label to one-hot ---------------------------
# one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
one_hot = torch.zeros(cosine.size(), device='cuda')
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
if self.ls_eps > 0:
one_hot = (1 - self.ls_eps) * one_hot + self.ls_eps / self.out_features
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output *= self.s
return output
# + papermill={"duration": 0.03002, "end_time": "2021-07-31T20:12:31.803677", "exception": false, "start_time": "2021-07-31T20:12:31.773657", "status": "completed"} tags=[] id="84fab4f2"
class AMZNNet(nn.Module):
def __init__(self,
n_classes,
model_name='bert-base-uncased',
pooling='mean_pooling',
use_fc=False,
fc_dim=512,
dropout=0.0,
loss_module='softmax',
s=30.0,
margin=0.50,
ls_eps=0.0,
theta_zero=0.785):
"""
:param n_classes:
:param model_name: name of model from pretrainedmodels
e.g. resnet50, resnext101_32x4d, pnasnet5large
:param pooling: One of ('SPoC', 'MAC', 'RMAC', 'GeM', 'Rpool', 'Flatten', 'CompactBilinearPooling')
:param loss_module: One of ('arcface', 'cosface', 'softmax')
"""
super(AMZNNet, self).__init__()
self.transformer = transformers.AutoModel.from_pretrained(transformer_model)
final_in_features = self.transformer.config.hidden_size
self.pooling = pooling
self.use_fc = use_fc
if use_fc:
self.dropout = nn.Dropout(p=dropout)
self.fc = nn.Linear(final_in_features, fc_dim)
self.bn = nn.BatchNorm1d(fc_dim)
self.relu = nn.ReLU()
self._init_params()
final_in_features = fc_dim
self.loss_module = loss_module
if loss_module == 'arcface':
self.final = ArcMarginProduct(final_in_features, n_classes,
s=s, m=margin, easy_margin=False, ls_eps=ls_eps)
else:
self.final = nn.Linear(final_in_features, n_classes)
def _init_params(self):
nn.init.xavier_normal_(self.fc.weight)
nn.init.constant_(self.fc.bias, 0)
nn.init.constant_(self.bn.weight, 1)
nn.init.constant_(self.bn.bias, 0)
def forward(self, input_ids,attention_mask, label):
feature = self.extract_feat(input_ids,attention_mask)
if self.loss_module == 'arcface':
logits = self.final(feature, label)
else:
logits = self.final(feature)
return logits
def extract_feat(self, input_ids,attention_mask):
x = self.transformer(input_ids=input_ids,attention_mask=attention_mask)
features = x[0]
features = features[:,0,:]
if self.use_fc:
features = self.dropout(features)
features = self.fc(features)
features = self.bn(features)
features = self.relu(features)
return features
# + papermill={"duration": 0.026026, "end_time": "2021-07-31T20:12:31.845490", "exception": false, "start_time": "2021-07-31T20:12:31.819464", "status": "completed"} tags=[] id="8f52a49f"
def train_fn(dataloader,model,criterion,optimizer,device,scheduler,epoch):
model.train()
loss_score = AverageMeter()
tk0 = tqdm(enumerate(dataloader), total=len(dataloader))
for bi,d in tk0:
batch_size = d[0].shape[0]
input_ids = d[0]
attention_mask = d[1]
targets = d[2]
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
targets = targets.to(device)
optimizer.zero_grad()
output = model(input_ids,attention_mask,targets)
loss = criterion(output,targets)
loss.backward()
optimizer.step()
loss_score.update(loss.detach().item(), batch_size)
tk0.set_postfix(Train_Loss=loss_score.avg,Epoch=epoch,LR=optimizer.param_groups[0]['lr'])
if scheduler is not None:
scheduler.step()
return loss_score
# + papermill={"duration": 0.025092, "end_time": "2021-07-31T20:12:31.886488", "exception": false, "start_time": "2021-07-31T20:12:31.861396", "status": "completed"} tags=[] id="6e1b7fb6"
@torch.no_grad()
def valid_fn(dataloader,model,criterion,device):
model.eval()
loss_score = AverageMeter()
tk0 = tqdm(enumerate(dataloader), total=len(dataloader))
for bi,d in tk0:
batch_size = d[0].shape[0]
input_ids = d[0]
attention_mask = d[1]
targets = d[2]
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
targets = targets.to(device)
output = model(input_ids,attention_mask,targets)
loss = criterion(output,targets)
loss_score.update(loss.detach().item(), batch_size)
tk0.set_postfix(Valid_Loss=loss_score.avg)
return loss_score
# + papermill={"duration": 0.023083, "end_time": "2021-07-31T20:12:31.925373", "exception": false, "start_time": "2021-07-31T20:12:31.902290", "status": "completed"} tags=[] id="0d6e2707"
import csv
# + papermill={"duration": 63.067583, "end_time": "2021-07-31T20:13:35.008866", "exception": false, "start_time": "2021-07-31T20:12:31.941283", "status": "completed"} tags=[] id="3b63520d"
data = pd.read_csv('../input/amazon-ml-challenge-2021-hackerearth/train.csv', escapechar = "\\", quoting = csv.QUOTE_NONE)
# + papermill={"duration": 0.740892, "end_time": "2021-07-31T20:13:35.766474", "exception": false, "start_time": "2021-07-31T20:13:35.025582", "status": "completed"} tags=[] id="06d5155c"
data = data[data.TITLE.notna()]
# data = data.head(1000)
# + papermill={"duration": 0.023487, "end_time": "2021-07-31T20:13:35.806445", "exception": false, "start_time": "2021-07-31T20:13:35.782958", "status": "completed"} tags=[] id="d946a39b"
data.reset_index(drop=True, inplace=True)
# + papermill={"duration": 27.989261, "end_time": "2021-07-31T20:14:03.811910", "exception": false, "start_time": "2021-07-31T20:13:35.822649", "status": "completed"} tags=[] id="1888e826" outputId="83d30e8d-8b0f-431f-893a-b8af7d6ff35d"
encoder = LabelEncoder()
encoder.fit(data['BROWSE_NODE_ID'])
# SPLIT 1: Divide into 2 parts of 15 Lakh each
skf = StratifiedKFold(n_splits=2, shuffle=True, random_state=42)
for fold_no, (t, v) in enumerate(skf.split(data['BROWSE_NODE_ID'], data['BROWSE_NODE_ID'])):
data.loc[v, 'kfold'] = fold_no
data = data[data.kfold == 0]
data.reset_index(drop=True, inplace=True)
# SPLIT 2: Divide into 13.5 Training and 1.5 Validation
skf2 = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
for fold_no, (t, v) in enumerate(skf2.split(data['BROWSE_NODE_ID'], data['BROWSE_NODE_ID'])):
data.loc[v, 'kfold'] = fold_no
valid_data = data[data.kfold == 0]
train_data = data[data.kfold != 0]
# + papermill={"duration": 0.025534, "end_time": "2021-07-31T20:14:03.854962", "exception": false, "start_time": "2021-07-31T20:14:03.829428", "status": "completed"} tags=[] id="213802d6" outputId="5c9d5dd1-4065-41cb-9ac3-bda65e7656d3"
train_data.shape
# + papermill={"duration": 0.219782, "end_time": "2021-07-31T20:14:04.130634", "exception": false, "start_time": "2021-07-31T20:14:03.910852", "status": "completed"} tags=[] id="c38cd3e0" outputId="744152c7-23d6-4da5-abde-2d5fe1ba79e1"
encoder = LabelEncoder()
train_data['BROWSE_NODE_ID'] = encoder.transform(train_data['BROWSE_NODE_ID'])
valid_data['BROWSE_NODE_ID'] = encoder.transform(valid_data['BROWSE_NODE_ID'])
# + papermill={"duration": 0.03088, "end_time": "2021-07-31T20:14:04.179552", "exception": false, "start_time": "2021-07-31T20:14:04.148672", "status": "completed"} tags=[] id="63f01488"
def run():
# Defining DataSet
train_dataset = AMZNDataset(
csv=train_data
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=TRAIN_BATCH_SIZE,
pin_memory=True,
drop_last=True,
num_workers=NUM_WORKERS
)
valid_dataset = AMZNDataset(
csv=valid_data
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=TRAIN_BATCH_SIZE,
pin_memory=True,
drop_last=True,
num_workers=NUM_WORKERS
)
# Defining Device
device = torch.device("cuda")
# Defining Model for specific fold
model = AMZNNet(**model_params)
model.to(device)
#DEfining criterion
criterion = fetch_loss()
criterion.to(device)
# Defining Optimizer with weight decay to params other than bias and layer norms
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0001},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
]
optimizer = AdamW(optimizer_parameters, lr=LR)
#Defining LR SCheduler
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=len(train_loader)*2,
num_training_steps=len(train_loader)*EPOCHS
)
# THE ENGINE LOOP
best_loss = 10000
for epoch in range(EPOCHS):
train_loss = train_fn(train_loader, model,criterion, optimizer, device,scheduler=scheduler,epoch=epoch)
valid_loss = valid_fn(valid_loader,model,criterion,device)
if valid_loss.avg < best_loss:
best_loss = valid_loss.avg
torch.save(model.state_dict(),f'sentence_transfomer_xlm_best_loss_num_epochs_{EPOCHS}_{loss_module}.bin')
# + papermill={"duration": 23910.429, "end_time": "2021-08-01T02:52:34.625652", "exception": false, "start_time": "2021-07-31T20:14:04.196652", "status": "completed"} tags=[] colab={"referenced_widgets": ["7ecc241491724cdc8e4c5b1821910115"]} id="40a24559" outputId="0490baaa-fac6-4f5c-e524-e58ce329d6e0"
run()
# + papermill={"duration": 62.48496, "end_time": "2021-08-01T02:54:37.856554", "exception": false, "start_time": "2021-08-01T02:53:35.371594", "status": "completed"} tags=[] id="c64cbe6d"
| scripts/sentence transformer training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''py39'': conda)'
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import xarray as xr
import os
from utils import mrd
import importlib
import dask
import dask.dataframe as dd
from dask.distributed import Client, progress
# +
# Import and subset day of ES4 data (DOY 282)
base_dir = "C:/Users/moo90/Box/data/materhorn/raw_data/ES/ES4/raw_20hz/"
data = pd.read_csv(os.path.join(base_dir, "DPG-UoU_ES4_3063_20Hz_FluxTower_20121007193013_20121009184326.txt"),
skiprows=[0,2,3], header=0, na_values=["NAN"], parse_dates=[0], index_col=[0])
# -
importlib.reload(mrd)
# +
#### Old, multi-index way (ran into issues when reading back data)
def interp_func(df, limit=5):
return df.interpolate(method='time', limit=limit)
#data_282 = data.loc['2012-10-08'].map_partitions(interp_func)
# Create height multiindex
data_282.columns = pd.MultiIndex.from_tuples([('_'.join(col.split('_')[:-1]),
float(col.split('_')[-1])) for col in data_282.columns],
names=['var','height'])
def doy_mrd(data, num_hrs, doy, pair_dict, dt=.05):
"""
Create multiindex df of mrd data from day of year
"""
# NOTE: need to fix timescales on here
heights = data['Ux'].columns.values
n_heights = heights.shape[0]
mrd_len = mrd.find_pow2(int(num_hrs * (1/dt) * 3600))
mrd_multiindex = pd.MultiIndex.from_product([list(pair_dict), heights.tolist(), ['t', 'd', 'd_accum']], names=['var','height','values'])
hourly_multiindex = pd.MultiIndex.from_product([np.arange(24), np.arange(mrd_len)], names=['hour', 'mrd_index'])
mrd_df = pd.DataFrame(index=np.arange(24), columns=mrd_multiindex)
for p in pair_dict.keys():
print(p)
for i,h in enumerate(heights):
temp_data = data.loc[:,(slice(None),h)]
# Calculate MRDs for each hour
for hour in np.arange(24):
temp_hr_data = temp_data.loc[temp_data.index.hour == hour].interpolate()
temp_T, temp_d = mrd.mrd_numba(temp_hr_data[pair_dict[p][0]].values.flatten(), temp_hr_data[pair_dict[p][1]].values.flatten())
mrd_df.loc[(hour), (p, h, 't')] = temp_T * dt
mrd_df.loc[(hour), (p, h, 'd')] = temp_d
mrd_df.loc[(hour), (p, h, 'd_accum')] = np.cumsum(temp_d)
return mrd_df
# +
## New, shiny way of starting out with xarray format and then writing out MRDs to netcdf
def interp_func(df, limit=5):
return df.interpolate(method='time', limit=limit)
def ts_csv_to_xarray(df):
mrd_ds =
return
#data_282 = data.loc['2012-10-08'].map_partitions(interp_func)
# Create height multiindex
data_282.columns = pd.MultiIndex.from_tuples([('_'.join(col.split('_')[:-1]),
float(col.split('_')[-1])) for col in data_282.columns],
names=['var','height'])
def doy_mrd(data, num_hrs, doy, pair_dict, dt=.05):
"""
Create multiindex df of mrd data from day of year
"""
# NOTE: need to fix timescales on here
heights = data['Ux'].columns.values
n_heights = heights.shape[0]
mrd_len = mrd.find_pow2(int(num_hrs * (1/dt) * 3600))
mrd_multiindex = pd.MultiIndex.from_product([list(pair_dict), heights.tolist(), ['t', 'd', 'd_accum']], names=['var','height','values'])
hourly_multiindex = pd.MultiIndex.from_product([np.arange(24), np.arange(mrd_len)], names=['hour', 'mrd_index'])
mrd_df = pd.DataFrame(index=np.arange(24), columns=mrd_multiindex)
for p in pair_dict.keys():
print(p)
for i,h in enumerate(heights):
temp_data = data.loc[:,(slice(None),h)]
# Calculate MRDs for each hour
for hour in np.arange(24):
temp_hr_data = temp_data.loc[temp_data.index.hour == hour].interpolate()
temp_T, temp_d = mrd.mrd_numba(temp_hr_data[pair_dict[p][0]].values.flatten(), temp_hr_data[pair_dict[p][1]].values.flatten())
mrd_df.loc[(hour), (p, h, 't')] = temp_T * dt
mrd_df.loc[(hour), (p, h, 'd')] = temp_d
mrd_df.loc[(hour), (p, h, 'd_accum')] = np.cumsum(temp_d)
return mrd_df
# -
# + tags=[]
# dict of name: value pairs
mrd_pair_dict = {'uw' : ['Ux', 'Uz'],
'uv' : ['Ux', 'Uy'],
'uu' : ['Ux', 'Ux'],
'vw' : ['Uy', 'Uz'],
'vv' : ['Uy', 'Uy'],
'ww' : ['Uz', 'Uz'],
'utheta' : ['Ux', 'T_Sonic'],
'vtheta' : ['Uy', 'T_Sonic'],
'wtheta' : ['Uz', 'T_Sonic']
}
doy_test = doy_mrd(data_282, 72000, 282, mrd_pair_dict)
doy_test
# Runtime: 3798.5 sec
# -
doy_test.to_csv('mrd_doy_282_numba.csv')
# +
test_day_282_1hr = pd.DataFrame(index=pd.date_range('2012-10-08T00:00', '2012-10-09T00:00', '1H'))
test_day_282_2hr = pd.DataFrame(index=pd.date_range('2012-10-08T00:00', '2012-10-09T00:00', '1H'))
test_day_279_1hr = pd.DataFrame(index=pd.date_range('2012-10-08T00:00', '2012-10-09T00:00', '1H'))
# -
asdf = pd.MultiIndex.from_product([pd.date_range('2012-10-08T00:00', '2012-10-09T00:00', '1H')])
mrd_pair_dict = {'uw' : ['Ux', 'Uz'],
'uv' : ['Ux', 'Uy'],
'uu' : ['Ux', 'Ux'],
'vw' : ['Uy', 'Uz'],
'vv' : ['Uy', 'Uy'],
'ww' : ['Uz', 'Uz'],
'utheta' : ['Ux', 'T_Sonic'],
'vtheta' : ['Uy', 'T_Sonic'],
'wtheta' : ['Uz', 'T_Sonic']
}
# + tags=[]
# AFTERNOON
mrd_pair_dict = {'utheta' : ['Ux', 'T_Sonic'],
'vtheta' : ['Uy', 'T_Sonic'],
'wtheta' : ['Uz', 'T_Sonic']
}
for k,v in mrd_pair_dict.items():
print(k)
n_heights = data_282['Ux'].columns.values.shape[0]
fig, axes = plt.subplots(ncols=3, nrows=2, **{'figsize':(10,4), 'dpi':300})
axes = axes.flatten()
for i,height in enumerate(data_282['Ux'].columns.values):
ax = axes[i]
temp_data = data_282.loc[:,(slice(None),height)]
# Calculate MRDs for each hour
for hour in np.arange(19, 23):
temp_hr_data = temp_data[temp_data.index.hour == hour].interpolate()
temp_T, temp_d = mrd.mrd(temp_hr_data[v[0]].values, temp_hr_data[v[1]].values)
ax.plot(temp_T*.05, temp_d, label=f"Hour={hour}")
ax.set_title(f"Height: {height}", fontsize='small')
ax.legend(fontsize='xx-small')
ax.grid(ls='--', alpha=.7)
ax.set_xscale('log')
ax.set_xlim(1e-1, 1e4)
ax.set_xlabel('Timescale [s]', fontsize='x-small')
ax.tick_params(axis='both', which='major', labelsize='x-small')
fig.tight_layout()
fig.savefig(f'mrd_test_{k}_afternoon.png')
# + tags=[]
mrd_pair_dict = {'uw' : ['Ux', 'Uz'],
'uv' : ['Ux', 'Uy'],
'uu' : ['Ux', 'Ux'],
'vw' : ['Uy', 'Uz'],
'vv' : ['Uy', 'Uy'],
'ww' : ['Uz', 'Uz'],
'utheta' : ['Ux', 'T_Sonic'],
'vtheta' : ['Uy', 'T_Sonic'],
'wtheta' : ['Uz', 'T_Sonic']
}
for k,v in mrd_pair_dict.items():
print(k)
n_heights = data_282['Ux'].columns.values.shape[0]
fig, axes = plt.subplots(ncols=3, nrows=2, **{'figsize':(10,4), 'dpi':300})
axes = axes.flatten()
for i,height in enumerate(data_282['Ux'].columns.values):
ax = axes[i]
temp_data = data_282.loc[:,(slice(None),height)]
# Calculate MRDs for each hour
for hour in np.arange(7, 12):
temp_hr_data = temp_data[temp_data.index.hour == hour].interpolate()
temp_T, temp_d = mrd.mrd(temp_hr_data[v[0]].values, temp_hr_data[v[1]].values)
ax.plot(temp_T*.05, temp_d, label=f"Hour={hour}")
ax.set_title(f"Height: {height}", fontsize='small')
ax.legend(fontsize='xx-small')
ax.grid(ls='--', alpha=.7)
ax.set_xscale('log')
ax.set_xlim(1e-1, 1e4)
ax.set_xlabel('Timescale [s]', fontsize='x-small')
ax.tick_params(axis='both', which='major', labelsize='x-small')
fig.tight_layout()
fig.savefig(f'mrd_test_{k}_morning.png')
# + tags=[]
import cupy as cp
print(cp.__version__)
# +
## Testing out new way to store in netcdfs
# Initial run -- four days with raw data (at es4)
base_dir = 'C:/Users/moo90/Box/data/materhorn/ES'
n_hours = 1
n_out_vals = int(np.floor(np.log2(n_hours*72000)))
var_pair_dict = {
'uu' : ['Ux', 'Ux'],
'uv' : ['Ux', 'Uy'],
'uw' : ['Ux', 'Uz'],
'vv' : ['Uy', 'Uy'],
'vw' : ['Uy', 'Uz'],
'ww' : ['Uz', 'Uz'],
'utheta' : ['Ux', 't_sonic'],
'vtheta' : ['Uy', 't_sonic'],
'wtheta' : ['Uz', 't_sonic']
}
coord_dict = {
'station' : [f'ES{s}' for s in np.arange(1,6)],
'date' : ['2012-10-01', '2012-10-05', '2012-10-08', '2013-05-25'],
'hour' : np.arange(24),
'var_pair' : list(var_pair_dict.keys()),
'mrd_pt' : np.arange(n_out_vals)
}
data_dict = {
't' : (['mrd_pt'], np.zeros(n_out_vals)),
'd' : (['mrd_pt'], np.zeros(n_out_vals)),
'd_accum' : (['mrd_pt'], np.zeros(n_out_vals))
}
mrd_ds = xr.Dataset(data_dict, coord_dict)
# -
mrd_ds
# +
t_da = xr.DataArray(np.ones(n_out_vals),
coords={'station':'ES1', 'date':'2012-10-01', 'hour':2, 'var_pair':'uu', 'mrd_pt':np.arange(16)}, dims=['mrd_pt'])
mrd_ds.sel(station='ES1', date='2012-10-01', hour=2, var_pair='uu')['t'] = t_da
print(mrd_ds.sel(station='ES1', date='2012-10-01', hour=2, var_pair='uu')['t'])
# -
print(t_da)
mrd_ds.sel(station='ES1', date='2012-10-01', hour=2, var_pair='uu')['t']
# +
for s in coord_dict['station']:
for d in coord_dict['date']:
# Read in station data
f_date_str = d.replace('-', '_')
try:
in_path = os.path.join(base_dir, s, 'raw_20hz', 'parq', f'{s}_EC_20hz_{f_date_str}.parquet')
temp_df = pd.read_parquet(in_path)
print(print(f'Read data for station: {s} date: {d}'))
except:
print(f'No data for station: {s} date: {d}')
continue
# Run MRD for all hours in that day for all variables
# -
| fluxtopo/mrd_testbed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="aXJg9L8cax39"
# #Ex01
# Trate o erro abaixo usando os blocos try e except.
# + id="UfDQDpMgax3-" colab={"base_uri": "https://localhost:8080/", "height": 183} outputId="f317fe7d-3c8e-4545-fc37-247c147aeb8c"
for i in ['a','b','c']:
print(i**2)
# + id="yLhS4lUDbHya" colab={"base_uri": "https://localhost:8080/"} outputId="47d7bd34-bfe4-4aaf-8396-a335a2b85c91"
try:
for i in ['a','b','c']:
print(i**2)
except:
print("Erro na funcao matematica. Revise se os dados da lista estao corretos.")
# + [markdown] id="AH31EIVlax4A"
# #Ex02
# Trate o erro abaixo usando os blocos **try** e **except**. Em seguida, use um bloco **finally** para imprimir 'All Done'.
# + id="3QNsPCIxax4B" outputId="3156ebb8-bd9d-48fb-be46-0348e01fbbc2"
x = 5
y = 0
z = x/y
# + [markdown] id="iCt97VwRax4B"
# #Ex03
# Escreva uma função que solicite um número inteiro e imprima o quadrado dele. Use um loop while com um try, except e else para contabilizar as entradas incorretas.
# + id="joGZS6vYax4C"
def ask():
pass()
# + id="RJ6bHDDQax4D" outputId="c52089ed-7f2a-4daa-f177-b9534140e69f"
ask()
# + id="R7jRHPRQax4E"
| log-prog-python/Exercicios_aula_08.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import numpy as np
import scipy.stats as stats
import scipy.special
#graphing
import matplotlib.pyplot as plt
#stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
#import testing
import sys
sys.path.append("../")
import vuong_tests5
# -
class OLS_loglike(GenericLikelihoodModel):
def __init__(self, *args,ols=False, **kwargs):
super(OLS_loglike,self).__init__(*args,**kwargs)
self.ols = ols
def loglikeobs(self, params):
y = self.endog
x = self.exog
mu_y = np.matmul(x,params)
resid = y - mu_y
sigma = np.sqrt(np.sum(resid**2)/resid.shape[0])
pr_y = stats.norm.logpdf( resid, loc=0,scale=sigma )
return pr_y
def gen_data(nobs=1000, a=1.00, num_params=4):
x = np.random.normal(scale=1., size=(nobs,num_params))
e = np.random.normal(loc=0.0, scale=1.0, size=nobs)
y = 1 + a*x.sum(axis=1) + e
return y,x,nobs
# +
def setup_model(yn,xn):
"""setup models for ease"""
model1 = sm.OLS(yn,sm.add_constant(xn))
model1_fit = model1.fit(disp=False)
params1 = (model1_fit.params)
model1_deriv = OLS_loglike(yn,sm.add_constant(xn))
ll1 = model1_deriv.loglikeobs(model1_fit.params)
return ll1
def setup_test(yn,xn):
lls = []
grads = []
hesss = []
params = []
for i in range(xn.shape[1]):
ll = setup_model(yn,xn[:,i])
lls.append(ll)
return np.array(lls)
def compute_test_stat(lls):
k = lls.shape[0]
n = lls.shape[1]
#setup stuff..
theta = lls.mean(axis=1).reshape((k,1))
V = np.cov(lls)
# linear restrictions
R = np.identity(k-1)
R = np.concatenate( (-1*np.ones( (k-1,1) ),R),axis=1 )
#compute the wald statistic
Rtheta = R.dot(theta)
RVR_inv = np.linalg.inv(R.dot(V).dot(R.transpose())/n)
return (Rtheta.transpose()).dot(RVR_inv).dot(Rtheta)[0,0]
yn,xn,nobs = gen_data(nobs=1000, a=1.0, num_params=4)
lls = setup_test(yn,xn)
compute_test_stat(lls)
# +
def order_lls(lls):
k = lls.shape[0]
means = lls.mean(axis=1).reshape((k,1))
return lls[means[:, 0].argsort()]
def recursive_test(lls):
"""lls must be ordered for this to work"""
test_stat = compute_test_stat(lls)
critical_value = stats.chi2.ppf(.95,lls.shape[0]-1)
reject_null = test_stat >= critical_value
result = [(reject_null, lls.shape[0])]
if reject_null and lls.shape[0] > 2 :
stat1 = recursive_test(lls[0:-1,:])
stat2 = recursive_test(lls[1:,:])
result.append(stat1)
result.append(stat2)
return result
def gen_data_new(nobs=1000, a=1.0, num_params=4):
x = np.random.normal(scale=1., size=(nobs,num_params))
e = np.random.normal(loc=0.0, scale=1.0, size=nobs)
y = 1 + 2*x[:,0] + a*x[:,1:].sum(axis=1) + e
return y,x,nobs
def parse_result(result):
level = 0
if len(result) == 1:
pass
if len(result) == 2:
stat1 = parse_result(result[1])
level = level + 1 + stat1
if len(result) == 3:
stat1 = parse_result(result[1])
stat2 = parse_result(result[1])
level = level + 1 + max(stat1,stat2)
return level
def monte_carlo(k=4,trials=10):
#check size of first test...
rejects = np.array([0]*(k-1))
# the index in the array refers to the stage when the test gets cut off
for i in range(trials):
yn,xn,nobs = gen_data_new(nobs=500, a=1.0, num_params=k)
lls = setup_test(yn,xn)
lls_ordered = order_lls(lls)
test_result = recursive_test(lls_ordered)
stop_point = parse_result(test_result)
rejects[stop_point] = rejects[stop_point] +1
return rejects/trials
print(monte_carlo(trials=100,k=5))
# -
def plot_true2(gen_data,setup_test,trials=500):
llr_stats = []
num_params=0
for i in range(trials):
yn, xn,nobs = gen_data()
llrs = setup_test(yn,xn)
llr = compute_test_stat(llrs)
llr_stats.append(llr)
num_params = xn.shape[1]
plt.hist(llr_stats, density=True,bins=15, label="True",alpha=.60)
#chi sq
chi_sq = np.random.normal(loc=0.0, scale=1.0, size=(num_params-1,1000))
chi_sq = (chi_sq**2).sum(axis=0)
plt.hist(chi_sq, density=True,bins=15, label=("Chi-sq %s"%(num_params-1)),alpha=.60)
#draw plot with actual cdf?
x = np.linspace(stats.chi2.ppf(0.01, num_params-1), stats.chi2.ppf(0.995, num_params-1), 100)
plt.plot(x, stats.chi2.pdf(x, num_params-1), 'r-', lw=5, alpha=0.6, label='chi2 pdf')
plt.legend()
plt.savefig('k' + str(num_params) +'.png',dpi=500)
plt.show()
return llr_stats
# # K = 4
# +
gen_data_ex = lambda : gen_data(nobs=1000, a=1.0, num_params=4)
llr_stats = plot_true2(gen_data_ex,setup_test)
# -
| multiple_models/sequential_test_v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quadratische Diskriminantenanalyse
#
# In dieser Übung werden Sie selbst eine quadratische Diskriminantenanalyse (QDA) implementieren. Zur Erinnerung: Die QDA berechnet $p(x|y)=\frac{p(y|x)*p(x)}{p(y)}$. Die Likelihood $p(y|x)$ wird als normalverteilt angenommen.
# ## Aufgabe 1
# Eine Fischerin benötigt Ihre Hilfe bei der Klassifikation von Fischen. Vor kurzem hat sie folgende Fische gefangen:
#
# | Länge (m) | Art |
# | ------------- |------------- |
# | 1.3 | Barsch |
# | 0.7 | Lachs |
# | 0.62 | Lachs |
# | 0.9 | Lachs |
# | 0.91 | Barsch |
# | 0.31 | Hering |
# | 0.26 | Hering |
#
# * Berechnen Sie die Priors $p(\omega)$ für jede Fischart
# * Berechnen Sie die Parameter $\mu$ und $\sigma^2$ für die Lkelihoods $p(x|\omega)$.
# * Die Fischerin fängt einen neuen Fisch mit Länge $x = 0.82 m$. Berechen Sie die Posterior-Wahrscheinlichkeit $p(\omega|x)$ für jede Klasse. Wie wird der Fisch klassifiziert?
#
# ## Aufgabe 2
# Implementieren Sie eine Funktion `priors(classes)`, die für einen Vektor von Klassen-Labels den Prior $p(x)$ für jede Klasse ausgibt.
# Die Eingabe soll ein Array von Klassen sein (z.b. `np.array(["stand","sit","sit","stand"])`). Die Ausgabe soll ein Data Frame mit den Spalten `class` und `prior` sein.
# +
import numpy as np
import pandas as pd
def priors(classes):
#TODO
return "implement me!"
pp = priors(np.array(["stand","sit","sit","sit","stand"]))
print(pp)
np.array(pp["class"])
# -
# ## Aufgabe 3
# Implementieren Sie eine Funktion `likelihood(data)`, die für ein Data Frame, bestehend aus einer Spalte $y$ und einer Spalte $x$, die Likelihood $p(y|x)$ für jede Klasse $x$ mit einer Normalverteilung approximiert, d.h. es soll für jede Klasse ein Mittelwert und eine Varianz ausgegeben werden.
# Die Ausgabe soll also die Spalten `class`, `mean` und `variance` besitzen.
#
# Plotten Sie die Likelihood für jede Klasse.
# +
def likelihood(data):
#TODO
return "implement me!"
data = arff.loadarff('features1.arff')
df = pd.DataFrame(data[0])
dat = df.loc[:, ["AccX_mean","class"]]
dat.columns = ["x","class"]
lik = likelihood(dat)
lik
# -
# ## Aufgabe 4
# Implementieren Sie eine Funktion mylda(newdat,lik,priors), die für eine neue Beobachtung `newdat` die wahrscheinlichste Klasse zurückgibt.
#
# Testen Sie Ihre Implementierung auf dem Datensatz `features1.arff`. „Trainieren“ Sie die QDA (d.h. berechnen Sie likelihood und prior), und führen Sie dann für die gleichen Daten eine Klassifikation durch. Wie gut ist die Klassifikation?
# +
from scipy.io import arff
import scipy.stats
def mylda(newdat,lik,prior):
#TODO
return "implement me!"
| 04-LDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Final Project: A prosperous but noisy city: Noise analysis in New York
# # Table of Content
# # Motivation
# ## What is your dataset?
# The dataset we used for the project is the records of 311 Service Requests, a government hotline from the NYC open data website which reflects the daily life problems of many residents. [Link](https://data.cityofnewyork.us/Social-Services/311-Service-Requests-from-2010-to-Present/erm2-nwe9) <br>The dataset includes government hotlines' records from 2010 to the present, about a decade, covering all aspects of residents' daily life.
#
# <br>
# New yorkers can complain by visiting NYC's online customer service, text messages, phone calls, skype, etc.NYC 311 dataset covers all aspects of citizen's life in New York, which can be roughly divided into the following categories: Benefit& Support, Business& Consumers, Courts& Law, Culture& Recreation, Education, Employment, Environment, Garbage& Recycling, Government& Elections, Health, Housing& Buildings, Noise, Pets,Pests& Wildlife, Public safety, Records, Sidewalks,Streets& highways, Taxes, Transportation.
# <br>
#
# NYC311's mission is to provide the public with fast, convenient city government services and information, while providing the best customer service. It also helps organizations improve the services they offer, allowing them to focus on their core tasks and manage their workloads effectively. Meanwhile, NYC 311also provides insights into improving city government through accurate and consistent measurement and analysis of service delivery.
#
# <br>
# Moreover,NYC311 is available 24 hours a day, 7 days a week, 365 days a year.
# Not only does NYC311 offer an online translation service in more than 50 languages, but users can call the 311 hotline in more than 175 languages if their language is not included.In addition, people who are deaf, hard of hearing or have language impairment can also complaint with special help such as video relay service (VRS).
#
# <br>We believe there is a lot of information to explore in such a large and data-rich dataset.
# ## Why did you choose this particular dataset?
# However, it was impossible for us to conduct a comprehensive analysis of this incredible hugh dataset, so after preliminary statistics, we chose the category with the most cumulative complaints over the past decade: Noise.
#
# <br>First of all, when it comes to environmental pollution, people may first think of air, soil, water and other aspects, but noise pollution, as an invisible and intangible existence, has the same impact on us that cannot be ignored.As a serious "urban disease", noise pollution has increasingly become the focus of modern urban life. New York, as a prosperous international city, also has such problems.
#
# <br>Moreover, We want to study the noise complaints in New York and analyze them from both spatial perspective and temporal perspective. We hope to learn something about the urban conditions, economic development, residents' living conditions and traffic conditions, etc, in the five boroughs of New York through the noise complaints. Moreover, we wonder whether noise complaints can be used to describe the overall development and brief condition of New York City over a 10-year period.
# ## What was your goal for the end user's experience?
# To begin with, we want to share interesting insights to the readers from noise analysis. The seemingly boring government complaints hotline actually contains many interesting insights, which not only reflect the people's life in New York, but also provide some directions and suggestions for the government to improve the city service.
# Also, via the analysis of noise complaints in NYC, we hope users could understand the characters, living habits, preferences and cultural backgrounds of the residents in the five different boroughs of New York.
# <br>
#
# Further more, we hope that readers can freely access the information they find useful through interactive map and interactive bar by reading the New York stories presented by us, which can not only increase readers' understanding but also make reading more participatory and interesting.
# # Basic stats
# ## Overview of the dataset
import pandas as pd
import numpy as np
df_origin=pd.read_csv('311-2019-all.csv')
df=pd.read_csv('311-All-Concise-with-IncidentZip.csv')
# The dataset has 22.8M rows and 41 columns with size of 12GB. The dataset is shown as follows.
df_origin.head(10)
# The attributes are shown as follows:
df_origin.columns
# We made a bar chart to show the 15 most frequent complaint type in New York during 2010~2020 to get some inspiration.
# +
import matplotlib.pyplot as plt
complaint_count=df['Complaint Type'].value_counts()
complaint_count.iloc[0:20]
title='The 15 most frequent complaint type in New York during 2010~2020'
to_display=complaint_count[0:15]
f,p=plt.subplots(figsize=(20,15))
p.bar(to_display.index,to_display.values)
p.tick_params(axis='x',labelrotation=90)
p.tick_params(labelsize=15)
p.set_title(title,fontsize=20)
# -
# From the figure, we found noise is the most reported complain type, which inspired us to discover more about it. For temporal and spatial analysis of Noise, we think only 9 attributes are relevant and retained.
df.columns
# These attributes are used for the different purpuses.
# * Created Date\Closed Date: Used for label the time of each cases, serve for temporal analysis. It is stored in String.
# * Complaint Type: Main complaint types.It has 439 different values and provide a fundationtal classification of each complaint type.
# * Descriptor: For some main types, people may be confused for the names are ambiguous. This is associated to the Complaint Type, and provides further detail on the incident or condition. Descriptor can be seen as a set of sub-type of each Complaint Type. It has 1168 different values.
# * Location Type: Describes the type of location used in the address information. It corresponds to 'Complaint Type' as well as 'Descriptor' so that it can provide more explaination. For example, The location type, Store, corresponds to the complaint type of Noise - Commercial. It helps when the Complaint Type and Descriptor are ambiguous.
# * Incident Zip: Incident location zip code. It describes the zipcode of the block where the incident took place. It contains some irrelevent information and NaN values and the method to handle with is explained in 2.2
# * Borough: Name of the borough where the incident took place. It contains some irrelevent information and NaN values and the method to handle with is explained in 2.2
# * Latitude/Longitude: Coordinates of the incident position.
# ## Data preprocessing and cleaning
# ### Datetime
# Firstly, We adopt Created Data as the time when the incident happened. It has to be transformed to pandas datetime objets so that we can extract the information.
suitform='%m/%d/%Y %H:%M:%S %p'
df['TransCDatetime']=pd.to_datetime(df['Created Date'],format=suitform)
df['month']=[i.month+(i.year-2010)*12 for i in df['TransCDatetime']]
time_nan=df['TransCDatetime'].isna()
time_nan.sum()
print('The percentage of nan value of for created time is {:10.2f}%'.format(time_nan.sum()/df.shape[0]*100))
# We successffully transformed the format of datatime, which indicates all the elements are valid and also no NaN value is detected in the attribute.
# ### Complaint type and Descriptor
# For noise analysis, we will have the five following main types. We only focus on the noise types that are in the 50 top complaints type.
complaint_count=df['Complaint Type'].value_counts()
TOP_COMPLAINTS=50
cared=complaint_count.iloc[0:TOP_COMPLAINTS].index
Noise_type=[]
for i in cared:
if 'oise' in i:
Noise_type.append(i)
Noise_type
# In each main type, we also have subtypes which are shown below.
# +
Noise_summary=dict()
for i in Noise_type:
temp=df[df['Complaint Type']==i]
Noise_summary[i]=temp
for i in Noise_type:
print('The main type is', i)
subtype=Noise_summary[i]['Descriptor'].unique()
for j in subtype:
print(' The subtype is',j)
# -
# In summary, we have 5 maintypes and 36 subtypes, which are considered all main types and subtypes are valid, so that no further cleaning and processing are demanded.
# ### Cleaning Incident Zip and Coordinates
# We created Choropleth map for distribution of noise cases acrss different blocks in 2019, by counting the number of cases for each zipcode.
#
# In the first place, the data quality for the ten years (2010~2020) is analyzed.
df['Incident Zip'].unique()
# Two main problems for the attribute Zipcode have been detected:
# * NaN values
# * Zipcode with invalid characters,e.g. alphabet
#
# It is necessary to figure out the the percentage of the valid values. It is calculated as follows.
# verify each item if they have the following problems: nan, invalid character
import re
zipnan=df['Incident Zip'].isna()
zipnan=zipnan.to_numpy()
zipalph=[]
for i in df['Incident Zip']:
a=(re.search('[a-zA-Z]', str(i))!=None)
b=(re.search('[-]', str(i))!=None)
zipalph.append(a and b)
zipalph=np.array(zipalph)
percentage=zipalph.sum()+zipnan.sum()
print('The percentage of invalid value of the whole dataset is {:10.2f}%'.format(percentage/df.shape[0]*100))
# The percentage of invalid values is 5.79%, which is acceptable because we mainly focus on the overall distribution and trend of some focused features.
#
# However, in the interactive map, we presented the noise distribution in 2019 so that a particular attention should be paid to the data quality for this year.
df['year']=[i.year for i in df['TransCDatetime']]
df_2019=df[df['year']==2019]
import re
zipnan1=df_2019['Incident Zip'].isna()
zipnan1=zipnan1.to_numpy()
zipalph1=[]
for i in df_2019['Incident Zip']:
a=(re.search('[a-zA-Z]', str(i))!=None)
b=(re.search('[-]', str(i))!=None)
zipalph1.append(a and b)
zipalph1=np.array(zipalph)
percentage=zipalph1.sum()+zipnan1.sum()
print('The percentage of invalid value for 2019 is {:10.2f}%'.format(percentage/df_2019.shape[0]*100))
# We have seen that it is of better quality compared to the dataset(3.16% of 2019 to 5.79% to 2010~2020), which indicates improvement in data collection by the government.
#
# But we still want to do correction to the invalid values for 2019. K-nearest-neighbours(KNN) is the machine learning algorithm can be adopted for this problem because the zipcode is determined by coordinates of the point. Therefore, the first thing came to our mind is that the probability of invalid coordinate given invalid zipcode because zipcode should be predicted based on coordinates.
#
# Here, outliers in coordinates are detected with boxplot.
a=df_2019['Latitude'].isna() & df_2019['Longitude'].isna()
b=df_2019['Latitude'].isna()
print('Total number of NaN in Latitude is {}'.format(a.sum()))
print('Total number of NaN in Latitude or Longitude is {}'.format(b.sum()))
# The two numbers are equal, which means that if NaN is present in Latitude, it is also NaN in the correspoding longitude.
f,p=plt.subplots(1,2,sharex=True,figsize=(20,5))
font=18
#titledict={'x':0.02,'y':0.9}
p[0].set_title('Latitude of noise cases',fontsize=font)
p[0].boxplot(df_2019[~b]['Latitude'])
p[0].tick_params(labelsize=font)
p[1].set_title('Longitude of noise cases',fontsize=font)
p[1].boxplot(df_2019[~b]['Longitude'])
p[1].tick_params(labelsize=font)
# After removing the NaN values, all the cocordinates are in the right range. We considered no other outliers included.
latnan1=b
latnan1=latnan1.to_numpy()
print('The percentage of invalid value of coordinates for 2019 is {:10.2f}%'.format(latnan1.sum()/df_2019.shape[0]*100))
# The percentage of invalid values is 5.31%. And then we are going to calculate the probability of invalid coordinate given invalid zipcode.
notused=0
for i in range(df_2019['Incident Zip'].shape[0]):
if latnan1[i] and zipnan1[i] and ~zipalph1[i]:
notused+=1
print('The percentage of invalid coordinate given invalid zipcode{:10.2f}%'.format(notused/percentage*100))
# It means that for the invalid zip code, it is 99.83% likely not having its coordinates. Therefore KNN will not be effective and it is also inferred that if the government did not record the zipcode, they also did not get the position of the case.
# However, in the interactive map, we presented the noise distribution in 2019 so that a particular attention should be paid to the data quality for this year.
#
# Based on above analsis, we discarded the invalid values for zipcode and it will not have great effect on the analysis result.
# ### Borough
# We create a intearactive bar chart displaying distributions of various noise types in different boroughs.
#
# In the first place, the data quality for the ten years (2010~2020) is analyzed.
df['Borough'].unique()
# It is shown that the invalid value is 'Unspecified', for which we have calculated its percentage in the whole dataset.
unspecified_whole=(df['Borough']=='Unspecified')
print('The percentage of invalid value of the whole dataset is {:10.2f}%'.format(unspecified_whole.sum()/df.shape[0]*100))
# The percentage of invalid values is 5.35%, which is acceptable to discard the invalid values because we mainly focus on the overall distribution and trend of some focused features.
# However, in the interactive bar chart, we presented distributions of various noise types in different boroughs in 2019 so that a particular attention should be paid to the data quality for this year.
unspecified_2019=(df_2019['Borough']=='Unspecified')
print('The percentage of invalid value of the whole dataset is {:10.2f}%'.format(unspecified_2019.sum()/df_2019.shape[0]*100))
# We have seen that it is of better quality compared to the dataset(0.91% of 2019 to 5.35% to 2010~2020), which indicates improvement in data collection by the government.
# As for our analysis, We discarded the unspeicifed value and it will not have a great influence on our analysis result.
# ### Summary of the dataset after cleaning and preprocessing
# Because the dataset covers a great number of complaint types, it is necessary to narrow it down to the main ones to obtain the main trends and features of noise in the New York city. After data cleanning and preprocessing, the dataset only contains the necessary attributes for the report. The datasize has 22662415 rows and 10 colomns (of original attributes).
df.head(10)
# # Data analysis
# ## The proportion of noise out of the whole cases.
count=0
for i in df['Complaint Type']:
if 'oise' in i:
count+=1
print('The percentage of noise out of the whole dataset is {:10.2f}%'.format(count/df.shape[0]*100))
# ## Sum up main types and sub types.
main_noise=df[df['Complaint Type'].str.contains('oise', regex=False)]
counts=main_noise['Complaint Type'].value_counts()
counts=counts.iloc[0:5,]
plt.figure(figsize=(12,8))
counts.plot(kind='bar')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.title('The sum of each main type (The 5 most frequently)',fontsize=15)
# The most frequently main type is Noise - Residiential, which shows that the noise cases rae mostly reported by the residents. Below, we also plot the 15 most frequently subtypes.
sub_noise=main_noise['Descriptor'].value_counts()
plt.figure(figsize=(12,8))
sub_noise.plot(kind='bar')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.title('The sum of each subtype (The 15 most frequently)',fontsize=15)
# ## The proportion of the considred noise cases out of the whole noise cases.
counts.sum()/count
# ## Plotting the monthly trend of main types
f,p=plt.subplots(len(Noise_type),figsize=(60,200))
m=0
month_range=np.arange(df['month'].min(),df['month'].max()+1)
month_range_scarce=np.arange(df['month'].min(),df['month'].max()+1,5)
for i in Noise_type:
monthly=pd.Series(np.zeros(len(month_range)+1),dtype='int32')
drawn=df[df['Complaint Type']==i]['month'].value_counts()
print('I am doing ', i)
for j in drawn.index:
monthly.loc[j]=drawn[j]
p[m].bar(month_range,monthly[month_range])
p[m].set_title(i,size=60)
p[m].tick_params(axis='x',labelrotation=90)
p[m].set_ylim(0,1.2*monthly.max(axis=0))
p[m].tick_params(labelsize=30)
p[m].set_xticks(month_range)
m+=1
# We have observed that for the five main crime types, they all show an increasing trend from 2010 to 2020 and seasonal fluctuation.
#
# We can obtain more information if the monthly trend of each subtype is plotted.
# ## Plotting the monthly trend of sub types
# +
# for i in Noise_type:
# m=0
# subtype=Noise_summary[i]['Descriptor'].unique()
# print('Len of subtype',len(subtype))
# f,p=plt.subplots(len(subtype),figsize=(60,200))
# plt.subplots_adjust(hspace = 0.4)
# for j in subtype:
# monthly=pd.Series(np.zeros(len(month_range)+1),dtype='int32')
# drawn=Noise_summary[i][Noise_summary[i]['Descriptor']==j]['month'].value_counts()
# print('I am doing ',i,j)
# for k in drawn.index:
# monthly.loc[k]=drawn[k]
# # print(monthly[month_range])
# p[m].bar(month_range,monthly[month_range])
# p[m].set_title(i+': '+j,size=60)
# p[m].tick_params(axis='x',labelrotation=90)
# p[m].set_ylim(0,1.2*monthly.max(axis=0))
# p[m].tick_params(labelsize=30)
# p[m].set_xticks(month_range_scarce)
# m+=1
m=0
n=0
f,p=plt.subplots(18,2,figsize=(60,100))
for i in Noise_type:
subtype=Noise_summary[i]['Descriptor'].unique()
# print('Len of subtype',len(subtype))
# if len(subtype)%2==1:
# rows=len(subtype)//2+1
# else:
# rows=len(subtype)//2
plt.subplots_adjust(hspace = 0.4)
for j in subtype:
monthly=pd.Series(np.zeros(len(month_range)+1),dtype='int32')
drawn=Noise_summary[i][Noise_summary[i]['Descriptor']==j]['month'].value_counts()
# print('I am doing ',i,j)
for k in drawn.index:
monthly.loc[k]=drawn[k]
# print(monthly[month_range])
# print(m,n)
p[m][n].bar(month_range,monthly[month_range])
p[m][n].set_title(i+': '+j,size=30)
p[m][n].tick_params(axis='x',labelrotation=90)
p[m][n].set_ylim(0,1.2*monthly.max(axis=0))
p[m][n].tick_params(labelsize=30)
p[m][n].set_xticks(month_range_scarce)
n+=1
if n==2:
m+=1
n=0
# -
# After initial analysis, we focuses only on the subtype of noise with complete data (all available from 2010 to 2020). Generally they show the seasonal pattern of more cases in the summer while less in the winter. Besides that, we sorted them subtypes into three catogories in terms of overall trend.
# * Ascending trend:most of the subtypes are in ascending trend, mostly relevant to human activity. e.g. Loud Music/Party, Loud Talking.
# * Stable: only a few, mostly irrelevant to human activities, e.g. Barking Dog.
# * Dscending trend: only one, <NAME>.
# ## Analysis of coordinates distribution
from scipy.stats import gaussian_kde
main_noise=main_noise[~np.isnan(main_noise['Latitude'])]
font=18
# histogram
f,p=plt.subplots(2,1,figsize=(10,8))
f.tight_layout(pad=3.0)
p[0].hist(main_noise['Latitude'],bins=50,alpha=0.75,edgecolor = 'white', linewidth = 1.2)
p[0].tick_params(labelsize=font)
p[0].set_title('Histogram and KDE of Latitude',fontsize=font)
# KDE
density = gaussian_kde(main_noise['Latitude'])
m,n=np.histogram(main_noise['Latitude'],bins=50)
p[1].plot(n,density(n))
p[1].tick_params(labelsize=font)
f,p=plt.subplots(2,1,figsize=(10,8))
f.tight_layout(pad=3.0)
p[0].hist(main_noise['Longitude'],bins=50,alpha=0.75,edgecolor = 'white', linewidth = 1.2)
p[0].tick_params(labelsize=font)
p[0].set_title('Histogram and KDE of Longitude',fontsize=font)
# KDE
density = gaussian_kde(main_noise['Longitude'])
m,n=np.histogram(main_noise['Longitude'],bins=50)
p[1].plot(n,density(n))
p[1].tick_params(labelsize=font)
# Based on the histogram, we observed how the coordinates are distributed and it fits the territorial shape of New York city.
# ## If Relevant talk about your machine leanrning.
# For this project, the focus is about statistical analysis, visualization and story-telling. No machine learning problems are involved in the analysis, except the case that we planned to use K-nearest-neighbours to make some correction for the default or invalid values in the attribute 'Incident Zip'. As it is described in the data cleaning section, it is impossible to implement KNN for mostly both coordinates and zipcode are missing at the same time while other attributes are considered irrelevant.
# # Genre
# ## Which tools did you use from each of the 3 categories of Visual Narrative (Figure 7 in Segal & Heer). Why?
# For visual narrative, we chose the interactive slideshow, which we thought would be a good way to balance author-driven and reader-driven stories. There is an overall time narrative structure (e.g., slideshow), however, at some point, the user can manipulate the interaction visualization(interactive map and interactive bar in this project) to see more detailed information so that the reader can better understand the pattern or extract more relevant information (e.g., via interacting with a slideslideshow). Readers should also be able to control the reading progression themselves.For highlighting, zooming is conducted by us, readers can further explore the details that arouse their interests.
# ## Which tools did you use from each of the 3 categories of Narrative Structure (Figure 7 in Segal & Heer) Why?
# Linear ordering is selected by us in order to form a complete story line, hover details and selection are conducted in interactive parts. We maintain these can increase the reader's sense of participation and interactivity in reading. In the messaging section, headlines, annotations,introductry and summary are used. The headline give the readers the guidance about the specific content of the article while the annotation help readers get more information description.The introduction plays the role of arousing readers' interest and attracting them to further reading, while the summary conclude the content and stimulate readers' thinking, both of which give readers have a complete concept of the whole story.
# # Visualizaition
# ## Explain the visualizations you've chosen.
# * Interactive choropleth map for distribution of noise cases acrss different blocks
# It is an interactive choropleth map which shows not only overall distribution of the reported cases but also detailed information of each block.
#
# The color of one block indicates how many repored noise cases per hectare in it and readers can easily get a good understanding of the overall distribution with reference to the color bar.
#
# Besides, when you put your mouse on a maker and click it, you will get the zip number, block name and the number of cases per hectare.
# * Distributions of various noise types in different boroughs
# It is an interactive bar that shows the distribution of top ten noise subtypes in the five boroughs of New York.
# We sorted out the top 10 sub-noise types in terms of frequency and calculatd the percentage for each borough. The x axis presents the 10 noise type while the y axis illustrates the percentage for each borough. When the mouse is moved onto the bar, it shows the accruate value of percentage.
# ## Why are they right for the story you want to tell?
# From interactive choropleth map and bar chart, readers can get a general understanding of the problem but also the detailed information as their interest. Also, we provide our own story line to tell the readers what we have found and want them to know and use necessary supplementary material (Image) to help readers better understand. These storyline origniates from the phenomenon presented in the interactive visualization. Therefore, we think they are the right tools for the report.
# # Discussion
# ## What went well?
# * The outliers and invalid values in the dataset but they consistute quite a small proportion(less than 5%) of the data we are concered.
# * All the codes work well and the result fits our genenral understanding of the problem but also the relevant information we obtained from the Internet.
# * We also find the right visualization tool to present our ideas.
# ## What could be improved? Why?
# * The interactive choropleth map is divided by blocks of different zipcode. We have observed that the size varies a lot across the blocks. All the data were sorted into some large block, which has resulted in the weakness that people cannot observe the distribution in the large block. We noticed that when we zoom in Manhattan and found some small blocks with high density and then realized that the uneven distribution in the large block was ignored. Heat map can be used to solve this problem but it cannot provide detailed information that we wanted to present to the readers. We consider the interactive
# * Our analysis was conducted by finding the information we thought related to the phenomenon. It has explained something but in some cases we are not able to know if it is the cause. We believe more exploration into some problems are worthy and more information and advanced mathematical tools are demanded.
# * There may be other interesting aspect of data that deserves to be explored. Heat water problem is the second most frequently reported category, which may also contain some interesting insight. Also, the relationship between differen noise types is also worthy to explore. But we think it is not very relevant to the storyline in the report.
# # Contribution
| Explainer 0514(Song)(v2) .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] azdata_cell_guid="1f608a1d-2436-4b48-80d4-5c4d2f8ca7d0"
# # Dynamics 365 Business Central Trouble Shooting Guide (TSG) - Connectors (PowerBI, PowerApps, LogicApps, or Flow)
#
# This notebook contains Kusto queries that can help getting to the root cause of an issue with usage of any of the connectors from PowerBI, PowerApps, LogicApps, or Flow.
#
# Each section in the notebook contains links to relevant documentation from the performance tuning guide [aka.ms/bcperformance](aka.ms/bcperformance), telemetry documentation in [aka.ms/bctelemetry](aka.ms/bctelemetry), as well as Kusto queries that help dive into a specific area.
#
# NB! The signal used in this notebook is only available in versions 16.3 (or newer) of Business Central, so check the version of your environment if some sections do not return any data.
# + [markdown] azdata_cell_guid="f103fae9-cf6d-40f7-9062-11ce50691046"
# ## 1\. Get setup: Load up Python libraries and connect to Application Insights
#
# First you need to set the notebook Kernel to Python3, load the KQLmagic module (did you install it? Install instructions: https://github.com/microsoft/BCTech/tree/master/samples/AppInsights/TroubleShootingGuides) and connect to Application Insights
# + azdata_cell_guid="5d02aa4b-9e41-474f-b643-2fbd482077af"
# load the KQLmagic module
# %reload_ext Kqlmagic
# + azdata_cell_guid="a253fa8e-6ac2-4722-a00a-1c52aedab4ed" tags=[]
# Connect to the Application Insights API
# %kql appinsights://appid='<add app id from the Application Insights portal>';appkey='<add API key from the Application Insights portal>'
# + [markdown] azdata_cell_guid="9ef1220c-d9cc-4552-9297-1428efcafb32"
# ## 2\. Define filters
#
# This workbook is designed for troubleshooting a single environment. Please provide values for aadTenantId and environmentName (or use a config file).
# + azdata_cell_guid="0a0785f7-a85e-4ccf-9020-732e1d4c058a" tags=[]
# Add values for AAD tenant id, environment name, and date range.
# It is possible to leave the value for environment name blank (if you want to analyze across all values of the parameter)
# You can either use configuration file (INI file format) or set filters directly.
# If you specify a config file, then variables set here takes precedence over manually set filter variables
# config file name and directory (full path)
configFile = "c:/tmp/notebook.ini"
# Add AAD tenant id and environment name here
aadTenantId = "MyaaDtenantId"
environmentName = ""
extensionId = "MyExtensionId"
# date filters for the analysis
# use YYYY-MM-DD format for the dates (ISO 8601)
startDate = "2020-11-20"
endDate = "2020-11-24"
# Do not edit this code section
import configparser
config = configparser.ConfigParser()
config.read(configFile)
if bool(config.defaults()):
if config.has_option('DEFAULT', 'aadTenantId'):
aadTenantId = config['DEFAULT']['aadTenantId']
if config.has_option('DEFAULT', 'environmentName'):
environmentName = config['DEFAULT']['environmentName']
if config.has_option('DEFAULT', 'extensionId'):
extensionId = config['DEFAULT']['extensionId']
if config.has_option('DEFAULT', 'startDate'):
startDate = config['DEFAULT']['startDate']
if config.has_option('DEFAULT', 'endDate'):
endDate = config['DEFAULT']['endDate']
print("Using these parameters for the analysis:")
print("----------------------------------------")
print("aadTenantId " + aadTenantId)
print("environmentName " + environmentName)
print("startDate " + startDate)
print("endDate " + endDate)
# + [markdown] azdata_cell_guid="5f9b698d-8a7e-4757-b27d-02f219d6c589"
# # Analyze connector usage
# Now you can run Kusto queries to look for possible root causes for issues about web services.
#
# Either click **Run All** above to run all sections, or scroll down to the type of analysis you want to do and manually run queries
# + [markdown] azdata_cell_guid="2f9c2d0d-df3c-482b-af58-48416a517117"
# ## Connector web service requests overview
# All connectors use the OData v4 protocol to connect to Business Central.
#
# Web service telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace
#
# KQL samples: https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/MicrosoftConnectorUsage.kql
# + azdata_cell_guid="a9e923e9-1d05-4acf-a230-4c5142bc3582" tags=[]
# %%kql
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.category == 'ODataV4'
// httpHeaders and httpStatusCode only available from 16.3
| extend httpHeadersTmp = tostring( customDimensions.httpHeaders)
| extend httpHeadersJSON = parse_json(httpHeadersTmp)
| extend msUserAgent = tostring( httpHeadersJSON.['ms-dyn-useragent'] )
| where msUserAgent has 'AzureConnector' or msUserAgent has 'PowerBIConnector' or msUserAgent has 'BusinessCentralLinkUnfurlingTeamsBot'
| extend httpStatusCode = customDimensions.httpStatusCode
, connector = case(
// ms-dyn-useragent=AzureConnector/1.0 Flow/1.0 DynamicsSmbSaas/1.0.0.0
msUserAgent matches regex "AzureConnector/(.)+Flow", 'Flow'
// ["PowerBIConnector/1.0 PowerBI/1.0 Dynamics365BusinessCentral/1.1.5"]
, msUserAgent matches regex "PowerBIConnector/", 'Power BI'
// ms-dyn-useragent=AzureConnector/1.0 PowerApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+PowerApps", 'PowerApps'
// ms-dyn-useragent=AzureConnector/1.0 LogicApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+LogicApps", 'LogicApps'
// ms-dyn-useragent=BusinessCentralLinkUnfurlingTeamsBot
, msUserAgent has "BusinessCentralLinkUnfurlingTeamsBot", 'MSTeams'
, msUserAgent has 'AzureConnector', 'Unknown Azure connector'
, 'Unknown connector'
)
| summarize request_count=count() by connector, bin(timestamp, 1d)
| render timechart title= 'Number of connector web service requests by category'
# + azdata_cell_guid="e4e56e1a-ab5d-427a-bc49-747e6ae34a75"
# %%kql
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.category == 'ODataV4'
// httpHeaders and httpStatusCode only available from 16.3
| extend httpHeadersTmp = tostring( customDimensions.httpHeaders)
| extend httpHeadersJSON = parse_json(httpHeadersTmp)
| extend msUserAgent = tostring( httpHeadersJSON.['ms-dyn-useragent'] )
| where msUserAgent has 'AzureConnector' or msUserAgent has 'PowerBIConnector' or msUserAgent has 'BusinessCentralLinkUnfurlingTeamsBot'
| extend httpStatusCode = customDimensions.httpStatusCode
, connector = case(
// ms-dyn-useragent=AzureConnector/1.0 Flow/1.0 DynamicsSmbSaas/1.0.0.0
msUserAgent matches regex "AzureConnector/(.)+Flow", 'Flow'
// ["PowerBIConnector/1.0 PowerBI/1.0 Dynamics365BusinessCentral/1.1.5"]
, msUserAgent matches regex "PowerBIConnector/", 'Power BI'
// ms-dyn-useragent=AzureConnector/1.0 PowerApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+PowerApps", 'PowerApps'
// ms-dyn-useragent=AzureConnector/1.0 LogicApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+LogicApps", 'LogicApps'
// ms-dyn-useragent=BusinessCentralLinkUnfurlingTeamsBot
, msUserAgent has "BusinessCentralLinkUnfurlingTeamsBot", 'MSTeams'
, msUserAgent has 'AzureConnector', 'Unknown Azure connector'
, 'Unknown connector'
)
, executionTimeInMS = toreal(totimespan(customDimensions.serverExecutionTime))/10000 //the datatype for executionTime is timespan
| summarize count() by executionTime_ms = bin(executionTimeInMS, 100), connector
| extend log_count = log10( count_ )
| order by connector, executionTime_ms asc
| render columnchart with (ycolumns = log_count, ytitle='log(count)', series = connector, title= 'Execution time (in milliseconds) of connector web service requests' )
# + [markdown] azdata_cell_guid="2f7e604a-0d02-484e-9bcb-a6aa148d5f0b"
# ## Connector throttling
# If web service requests exceed the operational limits in Business Central, then they will either be rejected (with a HTTP status code 426), or timeout (with a HTTP status code 408).
#
# Operational Limits for Business Central Online:
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#query-limits
#
# Telemetry docs:
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace
# + azdata_cell_guid="9ec9b678-7d66-4758-9101-4e9e5025dfcf" tags=[]
# %%kql
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.category == 'ODataV4'
// httpHeaders and httpStatusCode only available from 16.3
| extend httpHeadersTmp = tostring( customDimensions.httpHeaders)
| extend httpHeadersJSON = parse_json(httpHeadersTmp)
| extend msUserAgent = tostring( httpHeadersJSON.['ms-dyn-useragent'] )
| where msUserAgent has 'AzureConnector' or msUserAgent has 'PowerBIConnector' or msUserAgent has 'BusinessCentralLinkUnfurlingTeamsBot'
| extend httpStatusCode = tostring( customDimensions.httpStatusCode )
, connector = case(
// ms-dyn-useragent=AzureConnector/1.0 Flow/1.0 DynamicsSmbSaas/1.0.0.0
msUserAgent matches regex "AzureConnector/(.)+Flow", 'Flow'
// ["PowerBIConnector/1.0 PowerBI/1.0 Dynamics365BusinessCentral/1.1.5"]
, msUserAgent matches regex "PowerBIConnector/", 'Power BI'
// ms-dyn-useragent=AzureConnector/1.0 PowerApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+PowerApps", 'PowerApps'
// ms-dyn-useragent=AzureConnector/1.0 LogicApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+LogicApps", 'LogicApps'
// ms-dyn-useragent=BusinessCentralLinkUnfurlingTeamsBot
, msUserAgent has "BusinessCentralLinkUnfurlingTeamsBot", 'MSTeams'
, msUserAgent has 'AzureConnector', 'Unknown Azure connector'
, 'Unknown connector'
)
| summarize count() by bin(timestamp, 1d), httpStatusCode, connector
| render timechart title= 'Number of connector requests by http status code'
# + [markdown] azdata_cell_guid="f6a9d2d3-26b9-4536-b279-d126e5cd5609"
# ## Connector Web service requests (Access denied)
# The user who made the request doesn't have proper permissions. For more information, see
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/web-services-authentication
# * https://docs.microsoft.com/en-us/dynamics365/business-central/ui-define-granular-permissions
#
# Telemetry docs:
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace
# + azdata_cell_guid="ef3d4e9f-42bb-4492-bc3b-f88b33dcbdea"
# %%kql
//
// Top 10 endpoint requests with access denied
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.category == 'ODataV4'
and customDimensions.httpStatusCode == '401'
// httpHeaders and httpStatusCode only available from 16.3
| extend httpHeadersTmp = tostring( customDimensions.httpHeaders)
| extend httpHeadersJSON = parse_json(httpHeadersTmp)
| extend msUserAgent = tostring( httpHeadersJSON.['ms-dyn-useragent'] )
| where msUserAgent has 'AzureConnector' or msUserAgent has 'PowerBIConnector' or msUserAgent has 'BusinessCentralLinkUnfurlingTeamsBot'
| extend httpStatusCode = customDimensions.httpStatusCode
, connector = case(
// ms-dyn-useragent=AzureConnector/1.0 Flow/1.0 DynamicsSmbSaas/1.0.0.0
msUserAgent matches regex "AzureConnector/(.)+Flow", 'Flow'
// ["PowerBIConnector/1.0 PowerBI/1.0 Dynamics365BusinessCentral/1.1.5"]
, msUserAgent matches regex "PowerBIConnector/", 'Power BI'
// ms-dyn-useragent=AzureConnector/1.0 PowerApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+PowerApps", 'PowerApps'
// ms-dyn-useragent=AzureConnector/1.0 LogicApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+LogicApps", 'LogicApps'
// ms-dyn-useragent=BusinessCentralLinkUnfurlingTeamsBot
, msUserAgent has "BusinessCentralLinkUnfurlingTeamsBot", 'MSTeams'
, msUserAgent has 'AzureConnector', 'Unknown Azure connector'
, 'Unknown connector'
)
| summarize number_of_requests=count() by connector, endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )
| limit 10
# + [markdown] azdata_cell_guid="ffc66241-e49a-46c3-953c-edb1e3d1ef75"
# ## Connector web service requests (Not found)
# The given endpoint was not valid
#
# See
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/publish-web-service
#
# Telemetry docs:
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace
# + azdata_cell_guid="09e649eb-d8bb-43e8-8f8b-ff07c8cda005"
# %%kql
//
// Top 10 non-valid endpoints called
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.category == 'ODataV4'
and customDimensions.httpStatusCode == '404'
// httpHeaders and httpStatusCode only available from 16.3
| extend httpHeadersTmp = tostring( customDimensions.httpHeaders)
| extend httpHeadersJSON = parse_json(httpHeadersTmp)
| extend msUserAgent = tostring( httpHeadersJSON.['ms-dyn-useragent'] )
| where msUserAgent has 'AzureConnector' or msUserAgent has 'PowerBIConnector' or msUserAgent has 'BusinessCentralLinkUnfurlingTeamsBot'
| extend httpStatusCode = customDimensions.httpStatusCode
, connector = case(
// ms-dyn-useragent=AzureConnector/1.0 Flow/1.0 DynamicsSmbSaas/1.0.0.0
msUserAgent matches regex "AzureConnector/(.)+Flow", 'Flow'
// ["PowerBIConnector/1.0 PowerBI/1.0 Dynamics365BusinessCentral/1.1.5"]
, msUserAgent matches regex "PowerBIConnector/", 'Power BI'
// ms-dyn-useragent=AzureConnector/1.0 PowerApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+PowerApps", 'PowerApps'
// ms-dyn-useragent=AzureConnector/1.0 LogicApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+LogicApps", 'LogicApps'
// ms-dyn-useragent=BusinessCentralLinkUnfurlingTeamsBot
, msUserAgent has "BusinessCentralLinkUnfurlingTeamsBot", 'MSTeams'
, msUserAgent has 'AzureConnector', 'Unknown Azure connector'
, 'Unknown connector'
)
| summarize number_of_requests=count() by connector, endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )
| order by number_of_requests desc
| limit 10
# + [markdown] azdata_cell_guid="66ab172d-9d99-4228-98c7-68a4113a91a0"
# ## Connector web service requests (Request timed out)
# The request took longer to complete than the threshold configured for the service
#
# See
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#ODataServices
#
# Telemetry docs:
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace
# + azdata_cell_guid="f29afa7f-6408-4e85-a613-605d9898574d"
# %%kql
//
// Top 10 endpoints that timed out
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.category == 'ODataV4'
and customDimensions.httpStatusCode == '408'
// httpHeaders and httpStatusCode only available from 16.3
| extend httpHeadersTmp = tostring( customDimensions.httpHeaders)
| extend httpHeadersJSON = parse_json(httpHeadersTmp)
| extend msUserAgent = tostring( httpHeadersJSON.['ms-dyn-useragent'] )
| where msUserAgent has 'AzureConnector' or msUserAgent has 'PowerBIConnector' or msUserAgent has 'BusinessCentralLinkUnfurlingTeamsBot'
| extend httpStatusCode = customDimensions.httpStatusCode
, connector = case(
// ms-dyn-useragent=AzureConnector/1.0 Flow/1.0 DynamicsSmbSaas/1.0.0.0
msUserAgent matches regex "AzureConnector/(.)+Flow", 'Flow'
// ["PowerBIConnector/1.0 PowerBI/1.0 Dynamics365BusinessCentral/1.1.5"]
, msUserAgent matches regex "PowerBIConnector/", 'Power BI'
// ms-dyn-useragent=AzureConnector/1.0 PowerApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+PowerApps", 'PowerApps'
// ms-dyn-useragent=AzureConnector/1.0 LogicApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+LogicApps", 'LogicApps'
// ms-dyn-useragent=BusinessCentralLinkUnfurlingTeamsBot
, msUserAgent has "BusinessCentralLinkUnfurlingTeamsBot", 'MSTeams'
, msUserAgent has 'AzureConnector', 'Unknown Azure connector'
, 'Unknown connector'
)
| summarize number_of_requests=count() by connector, endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )
| order by number_of_requests desc
| limit 10
# + [markdown] azdata_cell_guid="68241327-780a-4766-9e51-b37f90d595dc"
# ## Connector web service requests (Too Many Requests)
# The request exceeded the maximum simultaneous requests allowed on the service.
#
# See
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#ODataServices
#
# Telemetry docs:
# * https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace
#
# + azdata_cell_guid="2c9888bb-6306-4b67-a545-a40ea5f97f60"
# %%kql
//
// Top 10 endpoints get throttled
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.httpStatusCode == '426'
and customDimensions.category == 'ODataV4'
// httpHeaders and httpStatusCode only available from 16.3
| extend httpHeadersTmp = tostring( customDimensions.httpHeaders)
| extend httpHeadersJSON = parse_json(httpHeadersTmp)
| extend msUserAgent = tostring( httpHeadersJSON.['ms-dyn-useragent'] )
| where msUserAgent has 'AzureConnector' or msUserAgent has 'PowerBIConnector' or msUserAgent has 'BusinessCentralLinkUnfurlingTeamsBot'
| extend httpStatusCode = customDimensions.httpStatusCode
, connector = case(
// ms-dyn-useragent=AzureConnector/1.0 Flow/1.0 DynamicsSmbSaas/1.0.0.0
msUserAgent matches regex "AzureConnector/(.)+Flow", 'Flow'
// ["PowerBIConnector/1.0 PowerBI/1.0 Dynamics365BusinessCentral/1.1.5"]
, msUserAgent matches regex "PowerBIConnector/", 'Power BI'
// ms-dyn-useragent=AzureConnector/1.0 PowerApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+PowerApps", 'PowerApps'
// ms-dyn-useragent=AzureConnector/1.0 LogicApps/3.20092.39 DynamicsSmbSaas/1.0.0.0
, msUserAgent matches regex "AzureConnector/(.)+LogicApps", 'LogicApps'
// ms-dyn-useragent=BusinessCentralLinkUnfurlingTeamsBot
, msUserAgent has "BusinessCentralLinkUnfurlingTeamsBot", 'MSTeams'
, msUserAgent has 'AzureConnector', 'Unknown Azure connector'
, 'Unknown connector'
)
| summarize number_of_requests=count() by connector, endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )
| order by number_of_requests desc
| limit 10
| samples/AppInsights/TroubleShootingGuides/D365BC Troubleshooting Guides (TSG)/content/Microsoft-Connectors-TSG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <hr style="height:2px;">
#
# # Computational quantitative phase imaging from focal stacks
#
# In this tutorial we will use Pycro-Manager to compute 2D quantitative phase images from collected focal stacks, without the need for specialized optics, by using computational imaging. Specifically, we will solve and inverse problem based on the [Transport of Intensity Equation (TIE)](https://en.wikipedia.org/wiki/Transport-of-intensity_equation). There are multiple ways of setting up and solving this inverse problem. In this example we will demonstrate how to solve it using [exponentially-spaced Z-planes and a Gaussian process regression solver](https://www.osapublishing.org/oe/fulltext.cfm?uri=oe-22-9-10661&id=284196).
#
# The inverse problem solving code used in this notebook is a translation of Matlab code that can be found [here](https://drive.google.com/a/berkeley.edu/file/d/0B_HY5ZswCff-cU8zWnFnZ3hIa1k/view?usp=sharing)
#
# <hr style="height:2px;">
#
# ## Part 1: Setup
#
# Create that functions that will be used to solve the inverse problems
# +
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from numpy import fft
from scipy.io import loadmat
# import tensorflow_probability as tfp
from scipy.optimize import fsolve
"""
intensities: defocused intensity stack, np array
z_vec: positions of images
lambda: wavelength
ps: pixel size
zfocus1: focal plane
Nsl: # of samples in Fourier space
"""
def GP_TIE(Ividmeas, z_vec, lambd, ps, zfocus, Nsl=100, eps1=1, eps2=1, reflect=False):
# code expects 2D arrays
if len(z_vec.shape) == 1:
z_vec = z_vec[:, None]
if isinstance(ps, float):
ps = np.array([[ps]])
elif len(ps.shape) == 1:
ps = ps[:, None]
RePhase1 = RunGaussianProcess(
Ividmeas, zfocus, z_vec, lambd, ps, Nsl, eps1, eps2, reflect
)
RePhase1 = RePhase1 / np.mean(Ividmeas)
# print("rephase1: ", RePhase1)
# print("norm: ", np.mean(Ividmeas))
return RePhase1
def RunGaussianProcess(Ividmeas, zfocus, z_vec, lambd, ps, Nsl, eps1, eps2, reflect):
(Nx, Ny, Nz) = Ividmeas.shape
I0 = Ividmeas[:, :, zfocus]
zfocus = z_vec[zfocus]
### Calculate S_c ###
# why is dz=1
freqs = CalFrequency(Ividmeas[:, :, 0], lambd, ps, 1)
max_freq = np.max(freqs)
max_freq = np.sqrt(max_freq / (lambd / 2))
freq_cutoff = np.linspace(0, 1, Nsl) * max_freq
freq_cutoff = freq_cutoff ** 2 * lambd / 2
SigmafStack = np.zeros((Nsl, 1))
SigmanStack = np.zeros((Nsl, 1))
SigmalStack = np.zeros((Nsl, 1))
freq_to_sc = np.linspace(1.2, 1.1, Nsl)
p = Nz / (np.max(z_vec) - np.min(z_vec))
# Figure out GP regression
for k in range(Nsl):
Sigman = 10.0 ** -9
Sigmaf = 1.0
f1 = freq_cutoff[k]
sc = f1 * freq_to_sc[k]
a = sc ** 2 * 2 * np.pi ** 2
b = np.log((p * (2 * np.pi) ** 0.5) / Sigman)
def fu2(x):
return a * np.exp(x) - 0.5 * x - b
x = fsolve(fu2, 5)
Sigmal = np.exp(x)
SigmafStack[k] = Sigmaf
SigmanStack[k] = Sigman
SigmalStack[k] = Sigmal
# print("SigmafStack: ", SigmafStack)
# print("SigmanStack: ", SigmanStack)
# print("SigmalStack: ", SigmalStack)
dIdzStack = np.zeros((Nx, Ny, Nsl))
CoeffStack = np.zeros((Nz, Nsl))
Coeff2Stack = np.zeros((Nz, Nsl))
for k in range(Nsl):
Sigmal = SigmalStack[k]
Sigman = SigmanStack[k]
Sigmaf = SigmafStack[k]
### GP Regression step
dIdz, Coeff, Coeff2 = GPRegression(
Ividmeas, zfocus, z_vec, Sigmaf, Sigmal, Sigman
)
# print("dIdz: ", dIdz)
dIdzStack[:, :, k] = 2 * np.pi / lambd * ps ** 2 * dIdz
CoeffStack[:, k] = Coeff
Coeff2Stack[:, k] = Coeff2
dIdzC = CombinePhase(dIdzStack, freq_cutoff, freqs, CoeffStack, Coeff2Stack)
# print("dIdzStack: ", dIdzStack)
# print("CoeffStack: ", CoeffStack)
# print("Coeff2Stack: ", Coeff2Stack)
### poisson solver
Del2_Psi_xy = (-2 * np.pi / lambd) * dIdzC
N = dIdzC.shape[0]
Psi_xy = poisson_solve(Del2_Psi_xy, ps, eps1, 0, reflect)
# print("Psi_xy: ", Psi_xy)
Grad_Psi_x, Grad_Psi_y = np.gradient(Psi_xy / ps)
Grad_Psi_x = Grad_Psi_x / (I0 + eps2)
Grad_Psi_y = Grad_Psi_y / (I0 + eps2)
# print("Grad_Psi_x: ", Grad_Psi_x.shape)
grad2x, _ = np.gradient(Grad_Psi_x / ps)
_, grad2y = np.gradient(Grad_Psi_y / ps)
Del2_Psi_xy = grad2x + grad2y
# print("Del2_Psi_xy: ", Del2_Psi_xy.shape)
Phi_xy = poisson_solve(Del2_Psi_xy, ps, eps1, 1, reflect)
# print("Phi_xy: ", Phi_xy.shape)
dcval = (
np.sum(Phi_xy[:, 0])
+ np.sum(Phi_xy[0, :])
+ np.sum(Phi_xy[N - 1, :])
+ np.sum(Phi_xy[:, N - 1])
) / (4 * N)
RePhase = -1 * (Phi_xy - dcval)
# print("dIdzC: ", dIdzC.shape)
# print("Del2_Psi_xy: ", Del2_Psi_xy.shape)
# print("Phi_xy: ", Phi_xy.shape)
# print("dcval: ", dcval.shape)
# print("rephase: ", RePhase.shape)
return RePhase
def CalFrequency(img, lambd, ps, dz):
(nx, ny) = img.shape
dfx = 1 / nx / ps
dfy = 1 / ny / ps
(Kxdown, Kydown) = np.mgrid[-nx // 2 : nx // 2, -ny // 2 : ny // 2]
Kxdown = Kxdown * dfx
Kydown = Kydown * dfy
freqs = lambd * np.pi * (Kxdown ** 2 + Kydown ** 2)
# normalized for sampling step and GP Regression ?
freqs = freqs * dz / (2 * np.pi)
return freqs
def CombinePhase(dIdzStack, Frq_cutoff, freqs, CoeffStack, Coeff2Stack):
def F(x):
return fft.ifftshift(fft.fft2(fft.fftshift(x)))
def Ft(x):
return fft.ifftshift(fft.ifft2(fft.fftshift(x)))
Nx, Ny, Nsl = dIdzStack.shape
dIdzC_fft = np.zeros((Nx, Ny))
Maskf = np.zeros((Nx, Ny))
f0 = 0
f1 = 1
for k in range(Nsl):
dIdz = dIdzStack[:, :, k]
dIdz_fft = F(dIdz)
f1 = Frq_cutoff[k]
Maskf = np.zeros((Nx, Ny))
Maskf[np.argwhere((freqs <= f1) & (freqs > f0))] = 1
f0 = f1
dIdzC_fft = dIdzC_fft + (dIdz_fft * Maskf)
return np.real(Ft(dIdzC_fft))
def poisson_solve(func, ps, eps, symm, reflect):
N = len(func)
if reflect != 0:
N = N * 2
func = np.hstack([func, np.fliplr(func)])
func = np.vstack([func, np.flipud(func)])
wx = 2 * np.pi * np.arange(0, N, 1) / N
fx = 1 / (2 * np.pi * ps) * (wx - np.pi * (1 - N % 2 / N))
[Fx, Fy] = np.meshgrid(fx, fx)
func_ft = np.fft.fftshift(np.fft.fft2(func))
Psi_ft = func_ft / (-4 * np.pi ** 2 * (Fx ** 2 + Fy ** 2 + eps))
if symm:
Psi_xy = np.fft.irfft2(np.fft.ifftshift(Psi_ft)[:, 0 : N // 2 + 1])
else:
Psi_xy = np.fft.ifft2(np.fft.ifftshift(Psi_ft))
if reflect != 0:
N = N // 2
Psi_xy = np.array(Psi_xy)[:N, :N]
# print("Psi_ft: ", Psi_ft.shape, "Psi_xy: ", Psi_xy.shape)
return Psi_xy
def mrdivide(A, B):
# Solves A / B or xA = B
return A.dot(np.linalg.pinv(B))
def GPRegression(Ividmeas, zfocus, z, Sigmaf, Sigmal, Sigman):
Nx, Ny, Nz = Ividmeas.shape
ones = np.ones((Nz, 1))
KZ = ones.dot(z.T) - z.dot(ones.T)
# print("z: ", z)
K = Sigmaf * (np.exp(-1 / 2 / Sigmal * (KZ ** 2)))
L = np.linalg.cholesky(K + (Sigman * np.eye(Nz))).T # why multiplying by I
z2 = zfocus
# print("zfocus: ", zfocus)
Nz2 = len(z2)
ones2 = np.ones((Nz2, 1))
KZ2 = ones * (z2.T) - z * (ones2.T)
# print("KZ2: ", KZ2)
# print("KZ2 stuff: ", ones, z2, z, ones2)
D = Sigmaf * (np.exp((-1 / 2 / Sigmal) * (KZ2 ** 2))) / -Sigmal * KZ2
# print("D: ", D)
# print("KZ2: ", KZ2)
# print("sigmaf: ", Sigmaf)
# print("sigmal: ", Sigmal)
# return
Coeff = mrdivide(mrdivide(D.T, L), L.T)[0] # D.T/L/L.T
# print("D: ", D)
# print("L: ", L)
# print("Coeff: ", Coeff)
D2 = Sigmaf * (np.exp((-1 / 2 / Sigmal) * (KZ2 ** 2)))
Coeff2 = mrdivide(mrdivide(D2.T, L), L.T) # D2.T/L/L.T
dIdz = np.zeros((Nx, Ny))
for k in range(Nz):
dIdz = dIdz + Ividmeas[:, :, k].dot(Coeff[k])
# print(k)
# print(Ividmeas[:,:,k])
# print(Coeff[k])
# print(Ividmeas[:,:,k].dot(Coeff[k]))
# print("dIdz: ", dIdz)
return dIdz, Coeff, Coeff2
# -
# ## Test the function using simulated data
# Test the functions using simulated data, which can be accessed [here](https://drive.google.com/a/berkeley.edu/file/d/0B_HY5ZswCff-cU8zWnFnZ3hIa1k/view?usp=sharing)
# +
test_path = "phase_rec_GUI/datasets/moustache_man_stack.mat"
data = loadmat(test_path)
Ividmeas = data["Istack"]
z_vec = np.ravel(data["zvec"])
lambd = data["lambda"][0][0]
ps = data["ps"]
zfocus = 1
Nsl = 100
phase = GP_TIE(Ividmeas.astype(np.float), np.ravel(z_vec), lambd, ps, zfocus)
# print("phase: ", phase)
plt.imshow(phase)
# plt.hist(np.ravel(phase))
plt.show()
# -
# <hr style="height:2px;">
#
# ## Part 2: Implement as a Pycro-manager image processor
# be sure to turn debug mode off when running on actual hardware. This example will NOT work in non-debug mode with the micro-manager demo config, because an array of all zeros will be output
# +
import copy
import numpy as np
from pycromanager import Acquisition, multi_d_acquisition_events
planes_per_z_stack = 5
lambd = 6.328e-07 # wavelength of the illumination light, in meters
debug = True # compute the phase image form test data in previous cell
# This image processor will run each time an image is acquired. If the image is the last one
# in the z-stack, the inverse problem will be solved, and the result added to the GUI. Otherwise
# the image will be accumulated into a temporary list
def img_process_fn(image, metadata):
# accumulate images as they come
if not hasattr(img_process_fn, "images"):
img_process_fn.images = []
img_process_fn.z_positions = []
# add pixels and z position
img_process_fn.images.append(image)
img_process_fn.z_positions.append(metadata["ZPosition_um_Intended"])
if metadata["Axes"]["z"] == planes_per_z_stack - 1:
# its the final one in the z stack
z_positions = np.array(img_process_fn.z_positions)
images = np.stack(img_process_fn.images, axis=2).astype(np.float)
# the z position that is solved for -- assume this is the median of the z-stack (i.e. its symmetrical)
solved_plane_index = np.argmin(np.abs(z_positions - np.median(z_positions)))
if debug:
# debugging -- send in the test data instead
phase_img = GP_TIE(
Ividmeas.astype(np.float), np.ravel(z_vec), lambd, ps, zfocus
)
else:
phase_img = GP_TIE(
images,
z_positions,
lambd,
1e-6 * metadata["PixelSizeUm"],
solved_plane_index,
)
# rescale to 16 bit, since the viewer doesn't accept 32 bit floats
phase_img = (
((phase_img - np.min(phase_img)) / (np.max(phase_img) - np.min(phase_img)))
* (2 ** 16 - 1)
).astype(">u2")
# create new metadta to go along with this phase image
phase_image_metadata = copy.deepcopy(metadata)
# make it appear as a new channel
phase_image_metadata["Channel"] = "Phase"
# Put it the z index closest to the solved plane
phase_image_metadata["Axes"]["z"] = solved_plane_index
# reset in case multiple z-stacks
img_process_fn.images = []
img_process_fn.z_positions = []
# return the original image and the phase image, in a new channel
return [(image, metadata), (phase_img, phase_image_metadata)]
else:
return image, metadata
img_process_fn.images = []
img_process_fn.z_positions = []
with Acquisition(
directory="/path/to/save", name="acq_name", image_process_fn=img_process_fn
) as acq:
# Generate the events for a single z-stack
events = []
for index, z_um in enumerate(np.linspace(0, 10, planes_per_z_stack)):
evt = {"axes": {"z": index}, "z": z_um}
events.append(evt)
acq.acquire(events)
| docs/source/application_notebooks/pycro_manager_tie_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, TensorDataset, Dataset
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
from fastai import *
from fastai.vision import *
import os, gc, random
import time
print(os.listdir("../input"))
import sys
print(sys.modules['fastai'])
from fastai.utils.show_install import *
show_install()
# + _uuid="4475e580ce3b4e20fbfe6da96bbe2750e25d22ac"
# make training deterministic/reproducible
def seed_everything(seed=2018):
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.backends.cudnn.deterministic = True
seed_everything()
def to_img(*args):
return np.array(args, dtype=np.uint8)
def conv2(ni, no, kernel_size=3, stride=2, padding=0):
return nn.Sequential(
nn.Conv2d(ni, no, kernel_size=kernel_size, stride=stride, padding=padding, bias=False),
# nn.ReLU(inplace=True),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(no)
)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class ResBlock(nn.Module):
def __init__(self, nf):
super(ResBlock, self).__init__()
self.conv1 = conv2(nf, nf, kernel_size=3, stride=1, padding=1)
self.conv2 = conv2(nf, nf, kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = x + self.conv2(self.conv1(x))
return x
# + _uuid="6be6fe2db0e2cde925f56ee4b6ad47e77750c9be"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
# + _uuid="31be62222f1e591577e6c48a7c82b84b95166980"
class ExImagePoints(ImagePoints):
def reconstruct(self, t, x): return ExImagePoints(FlowField(x.size, t), scale=False)
class PointsProcessor(PreProcessor):
"`PreProcessor` that stores the number of targets for point regression."
def __init__(self, ds:ItemList):
print('PointsProcessor')
print(ds)
self.c = len(ds.items[0].reshape(-1))
def process(self, ds:ItemList):
ds.c = self.c
class ExPointsLabelList(ItemList):
"`ItemList` for points."
_processor = PointsProcessor
def __post_init__(self): self.loss_func = MSELossFlat()
def get(self, i):
o = super().get(i)
o = torch.tensor(o.astype(np.float32), dtype=torch.float32)
return ExImagePoints(FlowField(self.x.get(i).size, o), scale=True, y_first=False)
def analyze_pred(self, pred, thresh:float=0.5): return pred.view(-1,2)
class ExPointsItemList(ImageList):
_label_cls,_square_show_res = ExPointsLabelList,False
def open(self, fn):
img = (to_img(*fn.split(' '))/255).reshape(1,96,96)
return Image(torch.tensor(img, dtype=torch.float32))
# + _uuid="f447b3fea84d2793bded0ccbbc51d0520f1c2c97"
train_df = pd.read_csv('../input/training/training.csv')
test_df = pd.read_csv('../input/test/test.csv')
# + _uuid="7ef097cf980ced1bd42757210e969ffc0a90089b"
# check to missing values
train_df.isna().any().value_counts()
# + _uuid="38b57f9f0a38d958ea7bd7d147c664a6ee87e78d"
print(train_df.shape)
# drop N/As
train_df.dropna(inplace=True)
train_df.shape
# + _uuid="028ac363aa430f2957eadd8382ec63153396b530"
labels = train_df.drop('Image',axis = 1).values.astype(np.float32)
labels = labels.reshape(-1, 15, 2)
# ## convert from (x,y) to (y,x)
# labels = labels[:, :, [1,0]]
labels = torch.tensor(labels, dtype=torch.float32)
# + _uuid="760ebbf3a00d4c93b290997a8fccd616f5b6246b"
valid_pct = 0.2
rand_idx = np.random.permutation(len(train_df))
cut = int(valid_pct * len(train_df))
train_idxs, valid_idxs = rand_idx[cut:],rand_idx[:cut]
train_idxs.shape,valid_idxs.shape
# + _uuid="b2eacebcfde8a0edf4ef9d4505d83d8ce7da4f1f"
data = ExPointsItemList(items=train_df.Image, path='.')
data = data.split_by_idxs(train_idx=train_idxs,valid_idx=valid_idxs)
data = data.label_from_lists(labels[train_idxs],labels[valid_idxs])
# data = data.databunch(bs=16)
# data
# + _uuid="dfffd1dab8bc07de10ff37d16d517f0874aceb5c"
empty_labels = np.zeros((len(test_df.Image), 15, 2))
data = data.add_test(items=test_df.Image, label=empty_labels)
# + _uuid="7e2d51ef99a0897495e89051599ea880e289084d"
tfms = get_transforms(do_flip=True,
flip_vert=False,
max_rotate=0.0,
max_zoom=1.0,
max_lighting=0.2,
max_warp=0.0,
p_affine=0.8,
p_lighting=0.8)
data = data.transform(tfms=tfms)
# + _uuid="15579a73a584a36f8492e69ed3e4e888cf9fa61f"
data = data.databunch(bs=16)
data
# + _uuid="e6fe330409d556b45169098116d2a5df59f91ce8"
data.show_batch(rows=2)
# + _uuid="87e3311de22dfa8761a3be1db84c289dffbc7874"
model = nn.Sequential(
conv2(1, 4, kernel_size=5, stride=1, padding=2),
ResBlock(4),
nn.MaxPool2d(2,2),
conv2(4, 6, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(2,2),
conv2(6, 8, kernel_size=4, stride=1, padding=2),
nn.MaxPool2d(2,2),
Flatten(),
nn.Dropout(0.5),
nn.Linear(12 * 12 * 8, 250),
nn.Dropout(0.5),
nn.Linear(250, 30),
)
# + _uuid="efca7b019b6c13346c9e62289e7ec18596104dc3"
# (xs,ys) = data.one_batch()
# model(xs).shape
# + _uuid="c63a27b89f2f0d10ba5700e18d5fa10bc05c3868"
def rmse(preds, targets):
return torch.sqrt(nn.functional.mse_loss(preds, targets.view(targets.size(0), -1)))
def mse(preds, targets):
return nn.functional.mse_loss(preds, targets.view(targets.size(0), -1))
learn = Learner(data, model, loss_func=mse, metrics=rmse)
# + _uuid="a1aec87c4e1a73d1c7247ca1a3c98c2e4ed8d30c"
learn.lr_find(end_lr=100)
# + _uuid="a43328d5cab998aa6dd76938b5f16db0c2785787"
learn.recorder.plot()
# + _uuid="8edb4a1fc8a46a75cf2ca0c0fbbee95d23bd2593"
learn.fit_one_cycle(20, max_lr=1e-2/2)
# + [markdown] _uuid="49aaf1091e1ca47ff38254eb5203573ae3e0c661"
# ## Test set
# + _uuid="42611cee76fdfc9e4be27e85f2a1010086d3606b"
preds,y = learn.get_preds(ds_type=DatasetType.Test)
# + _uuid="b7c7d836fe0f2e61e2872e8566805ab42b071f96"
preds.min(),preds.max()
# + _uuid="e011eae942b35800e36543180606cfcb0ae5ee7a"
preds = preds * 48 + 48
# + _uuid="c3ee4efd44aef77c469c64aa1445b374b9ed35c0"
def show_im(img, pnts):
img = (to_img(*img.split(' '))/255).reshape(1,96,96)
img = Image(torch.tensor(img, dtype=torch.float32))
pnts = pnts.reshape(15, 2)
pnts = ImagePoints(FlowField(img.size, pnts), y_first=True)
img.show(y=pnts)
show_im(test_df.Image[1], preds[1])
# show_im(train_df.Image[0], labels[0][:,[1,0]].reshape(-1))
# + _uuid="ba6c22e2204017fee0aade672719404354a020c3"
# (y,x) -> (x,y)
preds = preds.reshape(-1, 15, 2)[:, :, [1,0]]
preds = preds.reshape(-1, 30)
preds = preds.detach().numpy()
# + [markdown] _uuid="8c6597d4408764ba4868b8c4fdc69c17d4aee367"
# ### Submit Result
# + _uuid="7380e0b896bac0cc6b2872b1205ada8045ff06f4"
look_id = pd.read_csv('../input/IdLookupTable.csv')
look_id.drop('Location',axis=1,inplace=True)
# + _uuid="81f8bb66792b4117d9d7c07a21c4cc1a941b6e9b"
columns = train_df.drop('Image', axis=1).columns
# + _uuid="effee91d8ab376a2576694b067e5b43d5efe3069"
ind = np.array(columns)
value = np.array(range(0,30))
maps = pd.Series(value,ind)
# + _uuid="8f98139dfd9cc91b0bbba4b7c172a49ca57308e3"
look_id['location_id'] = look_id.FeatureName.map(maps)
# + _uuid="bca2a016bdd62f98e41432ec82add75d27fcc6f0"
df = look_id.copy()
location = pd.DataFrame({'Location':[]})
for i in range(1,1784):
ind = df[df.ImageId==i].location_id
location = location.append(pd.DataFrame(preds[i-1][list(ind)],columns=['Location']), ignore_index=True)
# + _uuid="50002321a6e58a0453b337fc324167a656d744ed"
look_id['Location']=location
# + _uuid="f6e5cb0e747027be1bfb4af4f78f539f53e72267"
look_id[['RowId','Location']].to_csv('submission.csv',index=False)
# + _uuid="99804dfa393a54611a319daac3a98384765583c6"
# !ls
# + _uuid="dae3cf8088807e286aefb778f7f298dac1ec3ff1"
| facial-keypoints-detection-fastai-1-0-46.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://homepage.divms.uiowa.edu/~luke/xls/glim/glim/node8.html
# + tags=[]
import pandas as pd
import numpy as np
mat = [
[np.nan, 7, 9, 7, 7, 9, 11],
[6, np.nan, 7, 5, 11, 9, 9],
[4, 6, np.nan, 7, 7, 8, 12],
[6, 8, 6, np.nan, 6, 7, 10],
[6, 2, 6, 7, np.nan, 7, 12],
[4, 4, 5, 6, 6, np.nan, 6],
[2, 4, 1, 3, 1, 7, np.nan]
]
teams = ['Milwaukee', 'Detroit', 'Toronto', 'New York', 'Boston', 'Cleveland', 'Baltimore']
df = pd.DataFrame(mat, columns=teams, index=teams)
df
# + tags=[]
wins = list(filter(lambda v: pd.notna(v) and v > 0, np.ravel(np.tril(df))))
# + tags=[]
losses = list(filter(lambda v: pd.notna(v) and v > 0, np.ravel(np.tril(df.T))))
# + tags=[]
n_teams = len(teams)
indices = list(product(teams, teams))
indices = [indices[i:i+n_teams] for i in range(0, len(indices), n_teams)]
indices = filter(lambda v: v != 0, np.ravel(np.tril(pd.DataFrame(indices))))
indices = filter(lambda tup: tup[0] != tup[1], indices)
indices = list(indices)
index=pd.MultiIndex.from_tuples(indices, names=['TeamW', 'TeamL'])
# + tags=[]
c2i = {c: i for i, c in enumerate(teams)}
ohe = pd.DataFrame([[1 if i == c2i[c1] else 0 for i in range(n_teams)] for c1, c2 in indices],
columns=teams,
index=index)
ohe
# + tags=[]
wl = pd.DataFrame({'w': wins, 'l': losses}, index=pd.MultiIndex.from_tuples(indices, names=['TeamW', 'TeamL']))
wl['y'] = wl.apply(lambda r: 1 if r.w - r.l > 0 else 0, axis=1)
wl
# + tags=[]
data = wl.join(ohe).drop(columns=['w', 'l', 'Milwaukee'])
data
# + tags=[]
X, y = data[[c for c in data.columns if c != 'y']], data['y']
# + tags=[]
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(penalty='l1', solver='liblinear', fit_intercept=False)
model.fit(X, y)
# + tags=[]
list(zip(X.columns, model.coef_[0]))
# -
| sphinx/datascience/source/nba/test-00.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # callbacks
# Type an introduction of the package here.
# + hide_input=true
from fastai.gen_doc.nbdoc import *
from fastai.callbacks import *
# + hide_input=true
show_doc(F)
# -
# `F`
# + hide_input=true
show_doc(PIL)
# -
# `PIL`
# + hide_input=true
show_doc(abc)
# -
# `abc`
# + hide_input=true
show_doc(collections)
# -
# `collections`
# + hide_input=true
show_doc(csv)
# -
# `csv`
# + hide_input=true
show_doc(fp16)
# -
# `fp16`
# + hide_input=true
show_doc(general_sched)
# -
# `general_sched`
# + hide_input=true
show_doc(gzip)
# -
# `gzip`
# + hide_input=true
show_doc(hashlib)
# -
# `hashlib`
# + hide_input=true
show_doc(hooks)
# -
# `hooks`
# + hide_input=true
show_doc(html)
# -
# `html`
# + hide_input=true
show_doc(inspect)
# -
# `inspect`
# + hide_input=true
show_doc(lr_finder)
# -
# `lr_finder`
# + hide_input=true
show_doc(math)
# -
# `math`
# + hide_input=true
show_doc(mimetypes)
# -
# `mimetypes`
# + hide_input=true
show_doc(mixup)
# -
# `mixup`
# + hide_input=true
show_doc(nn)
# -
# `nn`
# + hide_input=true
show_doc(np)
# -
# `np`
# + hide_input=true
show_doc(one_cycle)
# -
# `one_cycle`
# + hide_input=true
show_doc(operator)
# -
# `operator`
# + hide_input=true
show_doc(optim)
# -
# `optim`
# + hide_input=true
show_doc(os)
# -
# `os`
# + hide_input=true
show_doc(patches)
# -
# `patches`
# + hide_input=true
show_doc(patheffects)
# -
# `patheffects`
# + hide_input=true
show_doc(pd)
# -
# `pd`
# + hide_input=true
show_doc(pickle)
# -
# `pickle`
# + hide_input=true
show_doc(plt)
# -
# `plt`
# + hide_input=true
show_doc(random)
# -
# `random`
# + hide_input=true
show_doc(re)
# -
# `re`
# + hide_input=true
show_doc(rnn)
# -
# `rnn`
# + hide_input=true
show_doc(scipy)
# -
# `scipy`
# + hide_input=true
show_doc(shutil)
# -
# `shutil`
# + hide_input=true
show_doc(spacy)
# -
# `spacy`
# + hide_input=true
show_doc(torch)
# -
# `torch`
# + hide_input=true
show_doc(tvm)
# -
# `tvm`
# + hide_input=true
show_doc(typing)
# -
# `typing`
# + hide_input=true
show_doc(warnings)
# -
# `warnings`
| docs_src/callbacks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={}
# # Project Euler: Problem 2
# + [markdown] nbgrader={}
# Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 0 and 1, the first 12 terms will be:
#
# 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
#
# By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
# + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true}
# YOUR CODE HERE
fibo = [i for i in range(-30,2)] #Takes range of numbers from -30 to 2, the reason for those numbers being that\
def sequence(x): #when I ran my functions, that range was the only one where I started a 1 and\
for j in range(0,len(x)): #ended below 4 million, but I couldn;t figure out why.
x[j] = x[j-2] + x[j-1]
return x
print(sequence(fibo))
def evens(x):
if x % 2 == 0:
return x
new = list(filter(evens, fibo)) #filters out all even numbers in the sequence
print(new) #prints new sequence and sum of all numbers in sequence
print(sum(new))
# + deletable=false nbgrader={"checksum": "e8afe8a5735f0fff949b706895f8583d", "grade": true, "grade_id": "projecteuler2", "points": 10}
# This cell will be used for grading, leave it at the end of the notebook.
# -
| assignments/assignment02/ProjectEuler2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Dependencies and Setup
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Hide warning messages in notebook
import warnings
warnings.filterwarnings('ignore')
# File to Load (Remember to Change These)
mouse_drug_data_to_load = "data/mouse_drug_data.csv"
clinical_trial_data_to_load = "data/clinicaltrial_data.csv"
# Read the Mouse and Drug Data and the Clinical Trial Data
mouse_drug_data = pd.read_csv(mouse_drug_data_to_load)
clinical_trial_data = pd.read_csv(clinical_trial_data_to_load)
# mouse_drug_data.head()
# clinical_trial_data.head()
# Combine the data into a single dataset
merged_data = pd.merge(clinical_trial_data,mouse_drug_data,how="left", on="Mouse ID")
# Display the data table for preview
merged_data.head()
# -
# ## Tumor Response to Treatment
# +
# Store the Mean Tumor Volume Data Grouped by Drug and Timepoint
t_response_mean = merged_data.groupby(['Drug', 'Timepoint'])['Tumor Volume (mm3)'].mean()
t_response_mean
# Convert to DataFrame
tumor_response_df = pd.DataFrame(t_response_mean)
tumor_response_df = tumor_response_df.reset_index(drop=False)
# Preview DataFrame
tumor_response_df
# -
# +
# Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint
t_error = merged_data.groupby(['Drug', 'Timepoint'])['Tumor Volume (mm3)'].sem()
t_error
# Convert to DataFrame
tumor_response_error = pd.DataFrame(t_error)
tumor_response_error = tumor_response_error.reset_index(drop=False)
# Preview DataFrame
tumor_response_error.head()
# -
# +
# Minor Data Munging to Re-Format the Data Frames
tumor_response_df_pivot = tumor_response_df.copy()
# Preview that Reformatting worked
tumor_response_df_pivot.pivot(index="Timepoint",columns="Drug",values="Tumor Volume (mm3)")
# -
# +
# Calculating means - Capomulin
capomulin_means = tumor_response_df.loc[tumor_response_df["Drug"]=="Capomulin","Tumor Volume (mm3)"]
capomulin_means
#Calculating standard error - Capomulin
capomulin_error = tumor_response_error.loc[tumor_response_error["Drug"]=="Capomulin","Tumor Volume (mm3)"]
capomulin_error
# Calculating means - Ketapril
ketapril_means = tumor_response_df.loc[tumor_response_df["Drug"]=="Ketapril","Tumor Volume (mm3)"]
ketapril_means
#Calculating standard error - Ketapril
ketapril_error = tumor_response_error.loc[tumor_response_error["Drug"]=="Ketapril","Tumor Volume (mm3)"]
ketapril_error
# Calculating means - Infubinol
infubinol_means = tumor_response_df.loc[tumor_response_df["Drug"]=="Infubinol","Tumor Volume (mm3)"]
infubinol_means
#Calculating standard error - Infubinol
infubinol_error = tumor_response_error.loc[tumor_response_error["Drug"]=="Infubinol","Tumor Volume (mm3)"]
infubinol_error
# Calculating means - Placebo
placebo_means = tumor_response_df.loc[tumor_response_df["Drug"]=="Placebo","Tumor Volume (mm3)"]
placebo_means
#Calculating standard error - Placebo
placebo_error = tumor_response_error.loc[tumor_response_error["Drug"]=="Placebo","Tumor Volume (mm3)"]
placebo_error
# +
# Generate the Plot (with Error Bars)
#Define x-axis
x_axis = np.arange(0,50,5)
#Plot
plt.errorbar(x_axis, capomulin_means, capomulin_error,marker = "o",linestyle="dashed",color="red",label = "Capomulin")
plt.errorbar(x_axis, ketapril_means, ketapril_error,marker = "s",linestyle="dashed",color="green",label = "Ketapril")
plt.errorbar(x_axis, infubinol_means, infubinol_error,marker = "^",linestyle="dashed",color="blue",label = "Infubinol")
plt.errorbar(x_axis, placebo_means, placebo_error,marker = "D",linestyle="dashed",color="black",label = "Placebo")
#Set limits
plt.ylim(30, 80)
plt.xlim(0, (max(x_axis)+2))
#Labels and Title
plt.xlabel("Time(Days)")
plt.ylabel("Tumor Volume (mm3)")
plt.title("Tumor Response to Treatment")
#Legend
plt.legend(loc="best")
#Grid only on y axis
plt.grid(axis="y")
plt.tight_layout()
#Save Figure
plt.savefig("treatment.png")
# -
# Show the Figure
plt.show()
# 
# ## Metastatic Response to Treatment
# +
# Store the Mean Met. Site Data Grouped by Drug and Timepoint
metastatic_data = merged_data[["Mouse ID","Timepoint","Metastatic Sites","Drug"]].copy()
metastatic_data
meta_mean = metastatic_data.groupby(['Drug', 'Timepoint'])['Metastatic Sites'].mean()
meta_mean
# Convert to DataFrame
metastatic_mean_df = pd.DataFrame(meta_mean)
metastatic_mean_df = metastatic_mean_df.reset_index(drop=False)
# Preview DataFrame
metastatic_mean_df.head()
# -
# +
# Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint
metastatic_error = metastatic_data.groupby(['Drug', 'Timepoint'])['Metastatic Sites'].sem()
metastatic_error.head()
# Convert to DataFrame
metastatic_error_df = pd.DataFrame(metastatic_error)
# Preview DataFrame
metastatic_error_df.head()
# -
# Minor Data Munging to Re-Format the Data Frames
# Preview that Reformatting worked
metastatic_error_df = metastatic_error_df.reset_index()
metastatic_mean_df_pivot = metastatic_mean_df.copy()
metastatic_mean_df_pivot.pivot(index="Timepoint",columns="Drug",values="Metastatic Sites")
# +
# Calculating means - Capomulin
capomulin_mmeans=metastatic_mean_df.loc[metastatic_mean_df["Drug"]=="Capomulin","Metastatic Sites"]
capomulin_mmeans
#Calculating standard error - Capomulin
capomulin_merror=metastatic_error_df.loc[metastatic_error_df["Drug"]=="Capomulin","Metastatic Sites"]
capomulin_merror
# Calculating means - Ketapril
ketapril_mmeans=metastatic_mean_df.loc[metastatic_mean_df["Drug"]=="Ketapril","Metastatic Sites"]
ketapril_mmeans
#Calculating standard error - Ketapril
ketapril_merror=metastatic_error_df.loc[metastatic_error_df["Drug"]=="Ketapril","Metastatic Sites"]
ketapril_merror
# Calculating means - Infubinol
infubinol_mmeans=metastatic_mean_df.loc[metastatic_mean_df["Drug"]=="Infubinol","Metastatic Sites"]
infubinol_mmeans
#Calculating standard error - Infubinol
infubinol_merror=metastatic_error_df.loc[metastatic_error_df["Drug"]=="Infubinol","Metastatic Sites"]
infubinol_merror
# Calculating means - Placebo
placebo_mmeans=metastatic_mean_df.loc[metastatic_mean_df["Drug"]=="Placebo","Metastatic Sites"]
placebo_mmeans
#Calculating standard error - Placebo
placebo_merror=metastatic_error_df.loc[metastatic_error_df["Drug"]=="Placebo","Metastatic Sites"]
placebo_merror
# +
# Generate the Plot (with Error Bars)
#Define x_axis
x_axis1 = np.arange(0,50,5)
#Plot
plt.errorbar(x_axis1, capomulin_mmeans, capomulin_merror,marker = "o",linestyle="dashed",color="red",label = "Capomulin")
plt.errorbar(x_axis1, ketapril_mmeans, ketapril_merror,marker = "s",linestyle="dashed",color="green",label = "Ketapril")
plt.errorbar(x_axis1, infubinol_mmeans, infubinol_merror,marker = "^",linestyle="dashed",color="blue",label = "Infubinol")
plt.errorbar(x_axis1, placebo_mmeans, placebo_merror,marker = "D",linestyle="dashed",color="black",label = "Placebo")
#Set limits
plt.ylim(0,4)
plt.xlim(0,max(x_axis1)+2)
#Labels and Title
plt.xlabel("Treatment Duration(Days)")
plt.ylabel("Met. Sites")
plt.title("Metastatic Spread During Treatment")
#Legend
plt.legend(loc="best")
#Grid only on y axis
plt.grid(axis="y")
plt.tight_layout()
# Save the Figure
plt.savefig("metastatic_spread.png")
# -
# 
# Show the Figure
plt.show()
# ## Survival Rates
# +
# Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric)
mice_data_df=merged_data[["Mouse ID","Timepoint","Drug"]].copy()
mice_data_df
mice_count=mice_data_df.groupby(['Drug', 'Timepoint']).count()
mice_count.head()
# Convert to DataFrame
survival_rate=pd.DataFrame(mice_count)
survival_rate
survival_rate=survival_rate.reset_index(drop=False)
# Preview DataFrame
survival_rate.head(5)
# -
# +
# Minor Data Munging to Re-Format the Data Frames
# Preview the Data Frame
survival_rate_df=survival_rate.copy()
survival_rate_df.pivot(index="Timepoint",columns="Drug",values="Mouse ID")
# -
#Calculate the percentages
survival_percent_df = pd.DataFrame({"Drug" : survival_rate["Drug"],
"Timepoint": survival_rate["Timepoint"],
"Survival Percentage" :
survival_rate["Mouse ID"] /survival_rate["Mouse ID"].max() * 100})
survival_percent_df.head()
p_capomulin=survival_percent_df.loc[survival_percent_df["Drug"]=="Capomulin","Survival Percentage"]
p_capomulin
p_ketapril=survival_percent_df.loc[survival_percent_df["Drug"]=="Ketapril","Survival Percentage"]
p_ketapril
p_infubinol=survival_percent_df.loc[survival_percent_df["Drug"]=="Infubinol","Survival Percentage"]
p_infubinol
p_placebo=survival_percent_df.loc[survival_percent_df["Drug"]=="Placebo","Survival Percentage"]
p_placebo
# +
# Generate the Plot (Accounting for percentages)
x_axis2 = np.arange(0,50,5)
#Plot
plt.errorbar(x_axis2, p_capomulin, marker = "o",linestyle="dashed",color="red",label = "Capomulin")
plt.errorbar(x_axis2, p_ketapril, marker = "s",linestyle="dashed",color="green",label = "Ketapril")
plt.errorbar(x_axis2, p_infubinol, marker = "^",linestyle="dashed",color="blue",label = "Infubinol")
plt.errorbar(x_axis2, p_placebo, marker = "D",linestyle="dashed",color="black",label = "Placebo")
#Set limits
plt.ylim(0,100)
plt.xlim(0, 50)
#Labels and Title
plt.xlabel("Time (Days)")
plt.ylabel("Survival Rate (%)")
plt.title("Survival During Treatment")
#Legend
plt.legend(loc="best")
#Grid only on y axis
plt.grid()
plt.tight_layout()
# Save the Figure
plt.savefig("survival.png")
# Show the Figure
plt.show()
# -
# 
# ## Summary Bar Graph
# +
# Calculate the percent changes for each drug
#Calculate the highest value in timepoint, because it is uniform throught the data
#This highest value can be constantly used to calculate percent changes
max_tpoint = tumor_response_df["Timepoint"].max()
#Capomulin - Calculation
rec_Capomulin = tumor_response_df.loc[tumor_response_df["Drug"]=="Capomulin"]
array_Capomulin = rec_Capomulin["Tumor Volume (mm3)"]
v1 = array_Capomulin.reset_index(drop=True)
cp_Capomulin = (v1[9]-v1[0])/max_tpoint*100
#Ketapril - Calculation
rec_Ketapril=tumor_response_df.loc[tumor_response_df["Drug"]=="Ketapril"]
array_Ketapril=rec_Ketapril["Tumor Volume (mm3)"]
v2=array_Ketapril.reset_index(drop=True)
cp_Ketapril= (v2[9]-v2[0])/max_tpoint*100
#Naftisol - Calculation
rec_Naftisol=tumor_response_df.loc[tumor_response_df["Drug"]=="Naftisol"]
array_Naftisol=rec_Naftisol["Tumor Volume (mm3)"]
v3=array_Naftisol.reset_index(drop=True)
cp_Naftisol= (v3[9]-v3[0])/max_tpoint*100
#Infubinol - Calculation
rec_Infubinol=tumor_response_df.loc[tumor_response_df["Drug"]=="Infubinol"]
array_Infubinol=rec_Infubinol["Tumor Volume (mm3)"]
v4=array_Infubinol.reset_index(drop=True)
cp_Infubinol= (v4[9]-v4[0])/max_tpoint*100
#Stelasyn - Calculation
rec_Stelasyn=tumor_response_df.loc[tumor_response_df["Drug"]=="Stelasyn"]
array_Stelasyn=rec_Stelasyn["Tumor Volume (mm3)"]
v5=array_Stelasyn.reset_index(drop=True)
cp_Stelasyn= (v5[9]-v5[0])/max_tpoint*100
#Ramicane - Calculation
rec_Ramicane=tumor_response_df.loc[tumor_response_df["Drug"]=="Ramicane"]
array_Ramicane=rec_Ramicane["Tumor Volume (mm3)"]
v6=array_Ramicane.reset_index(drop=True)
cp_Ramicane= (v6[9]-v6[0])/max_tpoint*100
#Propriva - Calculation
rec_Propriva=tumor_response_df.loc[tumor_response_df["Drug"]=="Propriva"]
array_Propriva=rec_Propriva["Tumor Volume (mm3)"]
v7=array_Propriva.reset_index(drop=True)
cp_Propriva= (v7[9]-v7[0])/max_tpoint*100
#Zoniferol - Calculation
rec_Zoniferol=tumor_response_df.loc[tumor_response_df["Drug"]=="Zoniferol"]
array_Zoniferol=rec_Zoniferol["Tumor Volume (mm3)"]
v8=array_Zoniferol.reset_index(drop=True)
cp_Zoniferol= (v8[9]-v8[0])/max_tpoint*100
#Placebo - Calculation
rec_Placebo=tumor_response_df.loc[tumor_response_df["Drug"]=="Placebo"]
array_Placebo=rec_Placebo["Tumor Volume (mm3)"]
v9=array_Placebo.reset_index(drop=True)
cp_Placebo= (v9[9]-v9[0])/max_tpoint*100
#Ceftamin - Calculation
rec_Ceftamin=tumor_response_df.loc[tumor_response_df["Drug"]=="Ceftamin"]
array_Ceftamin=rec_Ceftamin["Tumor Volume (mm3)"]
v10=array_Ceftamin.reset_index(drop=True)
cp_Ceftamin= (v10[9]-v10[0])/max_tpoint*100
#
#Create dataframe
summary_df=pd.DataFrame({"Drug":["Capomulin","Ketapril","Naftisol","Infubinol","Stelasyn","Ramicane","Propriva","Zoniferol","Placebo","Ceftamin"],
"Percent Change":[cp_Capomulin,cp_Ketapril,cp_Naftisol,cp_Infubinol,cp_Stelasyn,cp_Ramicane,cp_Propriva,cp_Zoniferol,cp_Placebo,cp_Ceftamin]})
# Display the data to confirm
summary_df
# -
# +
# Store all Relevant Percent Changes into a Tuple
# Splice the data between passing and failing drugs
# Orient widths. Add labels, tick marks, etc.
# Use functions to label the percentages of changes
# Call functions to implement the function calls
summary_tuple = ( round(cp_Capomulin),round(cp_Infubinol),round(cp_Ketapril),round(cp_Placebo))
summary_tuple
drugs_tuple=("Capomulin","Infubinol","Ketapril","Placebo")
drugs_tuple
#Define x_axis
x_axis = np.arange(0, len(summary_tuple))
#Defing colors fot +ve and -ve percent change
colors = ["red" if x > 0 else 'green' for x in summary_tuple]
colors
fig,ax=plt.subplots()
#Plot the bar graph
ax.bar(x_axis, summary_tuple, color = colors, align="center",width=1.0)
#Plot the ticks
tick_location=[value for value in x_axis]
plt.xticks(tick_location,["Capomulin","Infubinol","Ketapril","Placebo"])
#Grid
ax.grid()
ax.set_xlabel("Drugs")
ax.set_ylabel("% Tumor Volume Change")
ax.set_title("Tumor Change Over 45 Day Treatment")
#Set the x-axis and y-axis limits
ax.set_xlim(-0.5,len(drugs_tuple))
ax.set_ylim(min(summary_tuple)-10,max(summary_tuple)+10)
#Adding value labels to bars
for i in range(len(drugs_tuple)):
if (summary_tuple[i]>0):
plt.text(x=0+i, y=i+10, s=(str(summary_tuple[i])+"%"), size=9,color="white")
else:
plt.text(x=0+i, y=i-10, s=(str(summary_tuple[i])+"%"), size=9,color="white")
# Save the Figure
plt.savefig("Tumor_Change.png")
# Show the Figure
fig.show()
# -
# 
# # Observations & Analysis
# The Drug Capomulin: (a) It is observed that the drug Capomulin is the most effective of all the drugs. (b) As the day's progressed it was observed that this drug decreased the tumor volume. (c) The metastatic spread to other sites during the course of the treatment was very low compared to the other drugs (d) The survival rate was the highest with the use of this drug.
#
# The Drug Ketapril: (a) It is observed that the drug Ketapril was the least effective among all the drugs. (b) As the day's progressed the tumor volume increased during the treatment. (c) The metastatic spread to the other sites during the course of the treatment was very high. (d) The survival rate of the mice was also observed to be low with the use of this drug.
| Pymaceuticals/pymaceuticals_starter-Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp nets
# -
# # nets
# > The neural network architects.
# Our implementation of the networks will follow this architecture:
#
# 
#export
import torch
from torch import nn
import torch.nn.functional as F
# ## Feature extraction
#
# When building complex networks it's better to build and test the smaller components first, then combine them together. This way we can also reuse the individual parts easily.
# ### Convolutional block
#
# This block takes the descriptor or fingerprint maps as input, and returns outputs of a max pooling layer.
#
# - Descriptor: `13*37*37` -> `48*37*37` -> `48*19*19`
# - Fingerprint: `3*37*36` -> `48*37*36` -> `48*19*18`
#export
class Convnet(nn.Module):
"Convolutional feature extraction Block"
def __init__(self, C_in=13, C_out=48, conv_size=13):
super(Convnet, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(C_in, C_out, kernel_size=conv_size, stride=1, padding='same'),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
def forward(self, x):
return self.conv(x)
# Let's test it on the descriptor and fingerprint maps
# +
convnet = Convnet()
i = torch.rand((10, 13, 37, 37))
o = convnet(i)
o.shape
# +
convnet = Convnet(3, 48)
i = torch.rand((10, 3, 37, 36))
o = convnet(i)
o.shape
# -
# ### Inception block
#
# After the convolutional block, the resulting feature maps will further pass through some inception blocks.
#
# The inceptions implemented here are the naïve Google inceptions. It passes the input through multiple convolutional layers and then concatenate the output. This inception block is actually two smaller inception blocks bridged with a max pooling layer. First the small inception block:
#
# - Descriptor: `48*19*19` -> 3 outputs of `32*19*19` -> `96*19*19`, |-> `96*10*10` -> 3 outputs of `64*10*10` -> `192*10*10`
# - Fingerprint: `48*19*18` -> 3 outputs of `32*19*18` -> `96*19*18`, |-> `96*10*9` -> 3 outputs of `64*10*9` -> `192*10*9`
#
#
#export
class Inception(nn.Module):
"Naive Google Inception Block"
def __init__(self, C_in=48, C_out=32, stride=1):
super(Inception, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(C_in, C_out, kernel_size=5, stride=stride, padding='same'),
nn.ReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv2d(C_in, C_out, kernel_size=3, stride=stride, padding='same'),
nn.ReLU(),
)
self.conv3 = nn.Sequential(
nn.Conv2d(C_in, C_out, kernel_size=1, stride=stride, padding='same'),
nn.ReLU(),
)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x)
x3 = self.conv3(x)
return torch.cat((x1, x2, x3), dim=1)
# +
inception = Inception()
i = torch.rand((10, 48, 19, 19))
o = inception(i)
o.shape
# +
inception = Inception(96, 64)
i = torch.rand((10, 96, 10, 10))
o = inception(i)
o.shape
# +
inception = Inception()
i = torch.rand((10, 48, 19, 18))
o = inception(i)
o.shape
# +
inception = Inception(96, 64)
i = torch.rand((10, 96, 10, 9))
o = inception(i)
o.shape
# -
#
# And the double inception block:
#
#export
class DoubleInception(nn.Module):
"Double Inception Block"
def __init__(self, C_in1=48, C_out1=32, stride1=1, C_in2=96, C_out2=64, stride2=1):
super(DoubleInception, self).__init__()
self.inception1 = Inception(C_in1, C_out1, stride1)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception2 = Inception(C_in2, C_out2, stride2)
def forward(self, x):
x = self.inception1(x)
x = self.maxpool(x)
x = self.inception2(x)
return x
# +
double_inception = DoubleInception()
i = torch.rand((10, 48, 19, 19))
o = double_inception(i)
o.shape
# +
double_inception = DoubleInception()
i = torch.rand((10, 48, 19, 18))
o = double_inception(i)
o.shape
# -
# ### Global max pooling
#
# There is no global max pooling layer in PyTorch but this is very easy to realise.
i = torch.rand((10, 192, 10, 10))
o = i.amax(dim=(-1, -2))
o.shape
# ### Fully connected block
#
# At the end of the network the data passes through several fully connected layers.
#
# If the MolMap network is single path:
#
# - `192` -> `128` -> `32`
#
# And if double path:
#
# - `384` -> `256` -> `128` -> `32`
#export
class SinglePathFullyConnected(nn.Module):
"Fully connected layers for single path MolMap nets"
def __init__(self, C1=192, C2=128, C3=32):
super(SinglePathFullyConnected, self).__init__()
self.fc = nn.Sequential(
nn.Linear(C1, C2),
nn.ReLU(),
nn.Linear(C2, C3)
)
def forward(self, x):
return self.fc(x)
# +
single_path_fully_connected = SinglePathFullyConnected()
i = torch.rand((10, 192))
o = single_path_fully_connected(i)
o.shape
# -
#export
class DoublePathFullyConnected(nn.Module):
"Fully connected layers for double paths MolMap nets"
def __init__(self, C1=384, C2=256, C3=128, C4=32):
super(DoublePathFullyConnected, self).__init__()
self.fc = nn.Sequential(
nn.Linear(C1, C2),
nn.ReLU(),
nn.Linear(C2, C3),
nn.ReLU(),
nn.Linear(C3, C4),
)
def forward(self, x):
return self.fc(x)
# +
double_path_fully_connected = DoublePathFullyConnected()
i = torch.rand((10, 384))
o = double_path_fully_connected(i)
o.shape
# -
# ## Single Path Molecular Mapping network
#
# Descriptor map or Fingerprint map only. The two feature maps use identical network structures and only differ in data shape. Note that we need to specify the number of channels for the feature maps when initialising the model, but the model should be able to handle feature maps with different dimensions.
#
# - descriptor: `13*37*37` -> `32`
# - fingerprint: `3*37*36` -> `32`
#
# The output layer is not included.
#export
class SinglePathMolMapNet(nn.Module):
"Single Path Molecular Mapping Network"
def __init__(self, conv_in=13, conv_size=13, FC=[128, 32]):
super(SinglePathMolMapNet, self).__init__()
# output channels in the double inception
C_out1, C_out2 = 32, 64
self.conv = Convnet(C_in=conv_in, C_out=48, conv_size=conv_size)
self.double_inception = DoubleInception(C_in1=48, C_out1=C_out1, C_in2=C_out1*3, C_out2=C_out2)
self.fully_connected = SinglePathFullyConnected(C1=C_out2*3, C2=FC[0], C3=FC[1])
def forward(self, x):
x = self.conv(x)
x = self.double_inception(x)
x = x.amax(dim=(-1, -2))
x = self.fully_connected(x)
return x
# +
single_path = SinglePathMolMapNet()
i = torch.rand((10, 13, 37, 37))
o = single_path(i)
o.shape
# +
single_path = SinglePathMolMapNet(conv_in=3)
i = torch.rand((10, 3, 37, 36))
o = single_path(i)
o.shape
# -
# ## Double Path Molecular Mapping network
#
# Both the descriptor map and Fingerprint map will pass through the convolutional block, then the double inception block, and their results are then combined, before finally pass through the fully connected layers.
#
# After convolutional and double inception block:
#
# - descriptor: `13*37*37` -> `192*10*10`
# - fingerprint: `3*37*36` -> `192*10*9`
#
# After global max pooling:
#
# - descriptor: `192*10*10` -> `192`
# - fingerprint: `192*10*9` -> `192`
#
# After Concatenation and fully connected blocks:
#
# - `192 + 192` -> `384` -> `32`
#
# The output layer is not included.
#export
class DoublePathMolMapNet(nn.Module):
"Double Path Molecular Mapping Network"
def __init__(self, conv_in1=13, conv_in2=3, conv_size=13, FC=[256, 128, 32]):
super(DoublePathMolMapNet, self).__init__()
# output channels in the double inception
C_out1, C_out2 = 32, 64
self.conv1 = Convnet(C_in=conv_in1, C_out=48, conv_size=conv_size)
self.conv2 = Convnet(C_in=conv_in2, C_out=48, conv_size=conv_size)
self.double_inception = DoubleInception(C_in1=48, C_out1=C_out1, C_in2=C_out1*3, C_out2=C_out2)
self.fully_connected = DoublePathFullyConnected(C1=C_out2*6, C2=FC[0], C3=FC[1], C4=FC[2])
def forward(self, x1, x2):
x1 = self.conv1(x1)
x1 = self.double_inception(x1)
x1 = x1.amax(dim=(-1, -2))
x2 = self.conv2(x2)
x2 = self.double_inception(x2)
x2 = x2.amax(dim=(-1, -2))
x = torch.cat((x1, x2), dim=1)
x = self.fully_connected(x)
return x
# +
double_path = DoublePathMolMapNet()
i1 = torch.rand((10, 13, 37, 37))
i2 = torch.rand((10, 3, 37, 36))
o = double_path(i1, i2)
o.shape
# -
# ## Resnet block
#
# Currently not used
#export
class Resnet(nn.Module):
"Naive Google Inception Block"
def __init__(self, C, conv_size):
super(Resnet, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(C, C, kernel_size=conv_size, stride=1, padding='same'),
nn.BatchNorm2d(C),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(C, C, kernel_size=conv_size, stride=1, padding='same'),
nn.BatchNorm2d(C)
)
def forward(self, x):
o = self.conv1(x)
o = self.conv2(o)
o += x
return F.relu(o)
# +
resnet = Resnet(48, 5)
i = torch.rand((10, 48, 19, 18))
o = resnet(i)
o.shape
# -
| 01_nets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # OpenPyXL Library Basics
# Source: [https://github.com/d-insight/code-bank.git](https://github.com/d-insight/code-bank.git)
# License: [MIT License](https://opensource.org/licenses/MIT). See open source [license](LICENSE) in the Code Bank repository.
# A Python library to read/write Excel 2010 xlsx/xlsm files. For more documentation, see: https://openpyxl.readthedocs.io/en/stable/
# -------------
# ## Install
# +
# #!python -m pip install openpyxl
# -
from openpyxl import load_workbook
from openpyxl import Workbook
# ## Files
path = "data/"
file = path + "SalesSampleData.xlsx"
# ## Getting Sheets from a Workbook
workbook = load_workbook(filename=file)
sheet = workbook.active
print(f'Worksheet names: {workbook.sheetnames}')
print(f'Active worksheet: {sheet}')
print(f'The title of the Active Worksheet is: {sheet.title}')
# ## Reading Cell Data
cell = sheet['B3']
print(cell.value)
sheet["A5"].value
# ## Iterating Over Rows and Columns
for cell in sheet['A']:
print(cell.value)
if cell.row == 10:
break
# Alternative
for value in sheet.iter_rows(
min_row=1, max_row=5,
min_col=1, max_col=3,
values_only=True,
):
print(value)
# ## Writing Excel Spreadsheets
# Initiate a New Workbook
workbook = Workbook()
# Select the sheet to write on (the active one)
sheet = workbook.active
sheet['A1'] = 'Hello'
sheet['A2'] = 'from'
sheet['A3'] = 'Me'
# Save the workbook
workbook.save(path + "new_workbook.xlsx")
# ## Adding and Removing Sheets
workbook.create_sheet()
print(workbook.sheetnames)
workbook.create_sheet(index=1, title='Second sheet')
print(workbook.sheetnames)
workbook.save(path + "new_workbook.xlsx")
# Let's delete some sheets
print(workbook.sheetnames)
del workbook['Second sheet']
print(workbook.sheetnames)
workbook.save(path + "new_workbook.xlsx")
# ## Adding and Deleting Rows and Columns
# insert a column before A (on the active sheet)
sheet.insert_cols(idx=1)
# insert 2 rows starting on the second row
sheet.insert_rows(idx=2, amount=2)
workbook.save(path + "new_workbook.xlsx")
# Delete column A
sheet.delete_cols(idx=1)
# delete 2 rows starting on the second row
sheet.delete_rows(idx=2, amount=2)
workbook.save(path + "new_workbook.xlsx")
# _________________________________________________________________________________________________________
# ### Functions
# +
def open_workbook(path):
workbook = load_workbook(filename=path)
print(f'Worksheet names: {workbook.sheetnames}')
sheet = workbook.active
print(sheet)
print(f'The title of the Active Worksheet is: {sheet.title}')
def get_cell_info(path):
workbook = load_workbook(filename=path)
sheet = workbook.active
print(sheet)
print(f'The title of the Worksheet is: {sheet.title}')
print(f'The value of {sheet["A5"].value=}')
print(f'The value of {sheet["C6"].value=}')
cell = sheet['B3']
print(f'{cell.value=}')
def iterating_range(path):
workbook = load_workbook(filename=path)
sheet = workbook.active
for cell in sheet['A']:
print(cell)
if cell.row == 10:
break
def iterating_over_values(path):
workbook = load_workbook(filename=path)
sheet = workbook.active
for value in sheet.iter_rows(
min_row=1, max_row=5,
min_col=1, max_col=3,
values_only=True,
):
print(value)
def create_workbook(path):
workbook = Workbook()
sheet = workbook.active
sheet['A1'] = 'Hello'
sheet['A2'] = 'from'
sheet['A3'] = 'OpenPyXL'
workbook.save(path)
def create_worksheets(path):
workbook = Workbook()
print(workbook.sheetnames)
# Add a new worksheet
workbook.create_sheet()
print(workbook.sheetnames)
# Insert a worksheet
workbook.create_sheet(index=1,
title='Second sheet')
print(workbook.sheetnames)
workbook.save(path)
def remove_worksheets(path):
workbook = Workbook()
workbook.create_sheet()
# Insert a worksheet
workbook.create_sheet(index=1,
title='Second sheet')
print(workbook.sheetnames)
del workbook['Second sheet'] # workbook.remove(sheet1)
print(workbook.sheetnames)
workbook.save(path)
def inserting_cols_rows(path):
workbook = Workbook()
sheet = workbook.active
sheet['A1'] = 'Hello'
sheet['A2'] = 'from'
sheet['A3'] = 'OpenPyXL'
# insert a column before A
sheet.insert_cols(idx=1)
# insert 2 rows starting on the second row
sheet.insert_rows(idx=2, amount=2)
workbook.save(path)
def deleting_cols_rows(path):
workbook = Workbook()
sheet = workbook.active
sheet['A1'] = 'Hello'
sheet['B1'] = 'from'
sheet['C1'] = 'OpenPyXL'
sheet['A2'] = 'row 2'
sheet['A3'] = 'row 3'
sheet['A4'] = 'row 4'
# Delete column A
sheet.delete_cols(idx=1)
# delete 2 rows starting on the second row
sheet.delete_rows(idx=2, amount=2)
workbook.save(path)
| _development/tutorials/automate-excel/2-OpenPyXL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from npknn import NonparametricKNN
#Data
train = np.array([[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5]])
target = np.array([1,2,3,7,17])
test = np.array([[3,3,3],[6,6,6]])
#Initialize model
model = NonparametricKNN(n_neighbors=3,loss='L2')
#Train
model.fit(train,target)
#Predict
model.predict(test)
#When we use L2 loss, we get exactly the same result as the KNN regressor in sklearn.
from sklearn.neighbors import KNeighborsRegressor
model = NonparametricKNN(n_neighbors=3)
model.fit(train,target)
model.predict(test)
#When we use other loss, predictions could be different.
model = NonparametricKNN(n_neighbors=3,loss='L1')
model.fit(train,target)
model.predict(test)
#Customized loss
def loss(pred,true):#Loss function should take two numpy arrays as inputs...
result = np.power(pred-true,4)
return result.mean()#and return a scalar.
model = NonparametricKNN(n_neighbors=3,loss=loss)#Pass the loss function as argument.
model.fit(train,target)
model.predict(test)
| examples.ipynb |